mirror of https://github.com/k3d-io/k3d
Merge pull request #275 from rancher/add-k3d-tools
Integrating k3d-tools repo as Go submodulepull/279/head
commit
e7ffd18fca
@ -0,0 +1,7 @@ |
|||||||
|
.github/ |
||||||
|
.local/ |
||||||
|
bin/ |
||||||
|
_dist/ |
||||||
|
tools/ |
||||||
|
proxy/ |
||||||
|
site/ |
@ -0,0 +1,11 @@ |
|||||||
|
FROM golang:1.14 as builder |
||||||
|
WORKDIR /app |
||||||
|
COPY . . |
||||||
|
ENV GO111MODULE=on |
||||||
|
ENV CGO_ENABLED=0 |
||||||
|
RUN make build |
||||||
|
|
||||||
|
FROM busybox:1.31 |
||||||
|
WORKDIR /app |
||||||
|
COPY --from=builder /app/bin/k3d-tools . |
||||||
|
ENTRYPOINT [ "/app/k3d-tools"] |
@ -0,0 +1,77 @@ |
|||||||
|
SHELL := /bin/bash
|
||||||
|
|
||||||
|
# get git tag
|
||||||
|
GIT_TAG := $(shell git describe --tags)
|
||||||
|
ifeq ($(GIT_TAG),) |
||||||
|
GIT_TAG := $(shell git describe --always)
|
||||||
|
endif |
||||||
|
|
||||||
|
# Go options
|
||||||
|
GO ?= go
|
||||||
|
PKG := $(shell go mod vendor)
|
||||||
|
TAGS :=
|
||||||
|
TESTS := .
|
||||||
|
TESTFLAGS :=
|
||||||
|
LDFLAGS := -w -s -X github.com/iwilltry42/k3d-tools/version.Version=${GIT_TAG}
|
||||||
|
GOFLAGS :=
|
||||||
|
BINDIR := $(CURDIR)/bin
|
||||||
|
BINARIES := k3d-tools
|
||||||
|
|
||||||
|
# Go Package required
|
||||||
|
PKG_GOX := github.com/mitchellh/gox
|
||||||
|
PKG_GOLANGCI_LINT := github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||||
|
|
||||||
|
export GO111MODULE=on
|
||||||
|
export CGO_ENABLED=0
|
||||||
|
|
||||||
|
# go source directories.
|
||||||
|
# DIRS defines a single level directly, we only look at *.go in this directory.
|
||||||
|
# REC_DIRS defines a source code tree. All go files are analyzed recursively.
|
||||||
|
DIRS := .
|
||||||
|
REC_DIRS := cmd
|
||||||
|
|
||||||
|
# Rules for finding all go source files using 'DIRS' and 'REC_DIRS'
|
||||||
|
GO_SRC := $(foreach dir,$(DIRS),$(wildcard $(dir)/*.go))
|
||||||
|
GO_SRC += $(foreach dir,$(REC_DIRS),$(shell find $(dir) -name "*.go"))
|
||||||
|
|
||||||
|
# Rules for directory list as input for the golangci-lint program
|
||||||
|
LINT_DIRS := $(DIRS) $(foreach dir,$(REC_DIRS),$(dir)/...)
|
||||||
|
|
||||||
|
.PHONY: all |
||||||
|
|
||||||
|
all: clean fmt check build |
||||||
|
|
||||||
|
build: |
||||||
|
$(GO) build -i $(GOFLAGS) -tags '$(TAGS)' -ldflags '$(LDFLAGS)' -o '$(BINDIR)/$(BINARIES)'
|
||||||
|
|
||||||
|
clean: |
||||||
|
@rm -rf $(BINDIR) _dist/
|
||||||
|
|
||||||
|
extra-clean: clean |
||||||
|
go clean -i $(PKG_GOX)
|
||||||
|
go clean -i $(PKG_GOLANGCI_LINT)
|
||||||
|
|
||||||
|
# fmt will fix the golang source style in place.
|
||||||
|
fmt: |
||||||
|
@gofmt -s -l -w $(GO_SRC)
|
||||||
|
|
||||||
|
# check-fmt returns an error code if any source code contains format error.
|
||||||
|
check-fmt: |
||||||
|
@test -z $(shell gofmt -s -l $(GO_SRC) | tee /dev/stderr) || echo "[WARN] Fix formatting issues with 'make fmt'"
|
||||||
|
|
||||||
|
lint: |
||||||
|
@golangci-lint run $(LINT_DIRS)
|
||||||
|
|
||||||
|
check: check-fmt lint |
||||||
|
|
||||||
|
# Check for required executables
|
||||||
|
HAS_GOX := $(shell command -v gox 2> /dev/null)
|
||||||
|
HAS_GOLANGCI := $(shell command -v golangci-lint 2> /dev/null)
|
||||||
|
|
||||||
|
install-tools: |
||||||
|
ifndef HAS_GOX |
||||||
|
(export GO111MODULE=off; go get -u $(PKG_GOX))
|
||||||
|
endif |
||||||
|
ifndef HAS_GOLANGCI |
||||||
|
(export GO111MODULE=off; go get -u $(PKG_GOLANGCI_LINT))
|
||||||
|
endif |
@ -0,0 +1,7 @@ |
|||||||
|
package run |
||||||
|
|
||||||
|
import "github.com/urfave/cli" |
||||||
|
|
||||||
|
func ImageSave(c *cli.Context) error { |
||||||
|
return imageSave(c.Args(), c.String("destination"), c.String("cluster")) |
||||||
|
} |
@ -0,0 +1,49 @@ |
|||||||
|
package run |
||||||
|
|
||||||
|
import ( |
||||||
|
"context" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"log" |
||||||
|
"os" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/docker/docker/client" |
||||||
|
) |
||||||
|
|
||||||
|
func imageSave(images []string, dest, clusterName string) error { |
||||||
|
// get a docker client
|
||||||
|
ctx := context.Background() |
||||||
|
docker, err := client.NewEnvClient() |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("ERROR: couldn't create docker client\n%+v", err) |
||||||
|
} |
||||||
|
|
||||||
|
imageReader, err := docker.ImageSave(ctx, images) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("ERROR: couldn't save images %s\n%+v", images, err) |
||||||
|
} |
||||||
|
defer imageReader.Close() |
||||||
|
|
||||||
|
tarFileName := dest |
||||||
|
if !strings.HasSuffix(dest, ".tar") { |
||||||
|
if !strings.HasSuffix(dest, "/") { |
||||||
|
dest = dest + "/" |
||||||
|
} |
||||||
|
tarFileName = fmt.Sprintf("%sk3d-%s-images-%s.tar", dest, clusterName, time.Now().Format("20060102150405")) |
||||||
|
} |
||||||
|
tarFile, err := os.Create(tarFileName) |
||||||
|
if err != nil { |
||||||
|
return fmt.Errorf("ERROR: couldn't create tarfile [%s]\n%+v", tarFileName, err) |
||||||
|
} |
||||||
|
defer tarFile.Close() |
||||||
|
|
||||||
|
if _, err = io.Copy(tarFile, imageReader); err != nil { |
||||||
|
return fmt.Errorf("ERROR: couldn't save image stream to tarfile\n%+v", err) |
||||||
|
} |
||||||
|
|
||||||
|
log.Printf("INFO: saved images %s to [%s]", images, tarFileName) |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,16 @@ |
|||||||
|
module github.com/rancher/k3d/tools |
||||||
|
|
||||||
|
go 1.14 |
||||||
|
|
||||||
|
require ( |
||||||
|
github.com/Microsoft/go-winio v0.4.12 // indirect |
||||||
|
github.com/docker/distribution v2.7.1+incompatible // indirect |
||||||
|
github.com/docker/docker v1.13.1 |
||||||
|
github.com/docker/go-connections v0.4.0 // indirect |
||||||
|
github.com/docker/go-units v0.4.0 // indirect |
||||||
|
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect |
||||||
|
github.com/pkg/errors v0.8.1 // indirect |
||||||
|
github.com/stretchr/testify v1.3.0 // indirect |
||||||
|
github.com/urfave/cli v1.20.0 |
||||||
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect |
||||||
|
) |
@ -0,0 +1,29 @@ |
|||||||
|
github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc= |
||||||
|
github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= |
||||||
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= |
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||||
|
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= |
||||||
|
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= |
||||||
|
github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo= |
||||||
|
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= |
||||||
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= |
||||||
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= |
||||||
|
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= |
||||||
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= |
||||||
|
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= |
||||||
|
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= |
||||||
|
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= |
||||||
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= |
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
||||||
|
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= |
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= |
||||||
|
github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= |
||||||
|
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= |
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= |
||||||
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= |
||||||
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= |
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= |
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= |
@ -0,0 +1,61 @@ |
|||||||
|
package main |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"log" |
||||||
|
"os" |
||||||
|
"time" |
||||||
|
|
||||||
|
run "github.com/rancher/k3d/tools/cmd" |
||||||
|
"github.com/rancher/k3d/tools/version" |
||||||
|
"github.com/urfave/cli" |
||||||
|
) |
||||||
|
|
||||||
|
// main represents the CLI application
|
||||||
|
func main() { |
||||||
|
|
||||||
|
// App Details
|
||||||
|
app := cli.NewApp() |
||||||
|
app.Name = "k3d-tools" |
||||||
|
app.Usage = "Tools to help running k3d successfully!" |
||||||
|
app.Version = version.GetVersion() |
||||||
|
|
||||||
|
// commands that you can execute
|
||||||
|
app.Commands = []cli.Command{ |
||||||
|
{ |
||||||
|
// save-image
|
||||||
|
Name: "save-image", |
||||||
|
Aliases: []string{"save"}, |
||||||
|
Usage: "Save images to tarball", |
||||||
|
Flags: []cli.Flag{ |
||||||
|
cli.StringFlag{ |
||||||
|
Name: "destination, dest, d", |
||||||
|
Value: "/images", |
||||||
|
Usage: "destination tar-file (optional)", |
||||||
|
}, |
||||||
|
cli.StringFlag{ |
||||||
|
Name: "cluster, c", |
||||||
|
Value: "k3s-default", |
||||||
|
Usage: "name of the k3d cluster", |
||||||
|
}, |
||||||
|
}, |
||||||
|
Action: run.ImageSave, |
||||||
|
}, |
||||||
|
{ |
||||||
|
Name: "noop", |
||||||
|
Usage: "Don't do anything and sleep forever", |
||||||
|
Action: func(c *cli.Context) { |
||||||
|
for { |
||||||
|
fmt.Println("Sleeping for 12h") |
||||||
|
time.Sleep(12 * time.Hour) |
||||||
|
} |
||||||
|
}, |
||||||
|
}, |
||||||
|
} |
||||||
|
|
||||||
|
// run the whole thing
|
||||||
|
err := app.Run(os.Args) |
||||||
|
if err != nil { |
||||||
|
log.Fatal(err) |
||||||
|
} |
||||||
|
} |
@ -0,0 +1 @@ |
|||||||
|
*.exe |
@ -0,0 +1,22 @@ |
|||||||
|
The MIT License (MIT) |
||||||
|
|
||||||
|
Copyright (c) 2015 Microsoft |
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||||
|
of this software and associated documentation files (the "Software"), to deal |
||||||
|
in the Software without restriction, including without limitation the rights |
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||||
|
copies of the Software, and to permit persons to whom the Software is |
||||||
|
furnished to do so, subject to the following conditions: |
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all |
||||||
|
copies or substantial portions of the Software. |
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||||
|
SOFTWARE. |
||||||
|
|
@ -0,0 +1,22 @@ |
|||||||
|
# go-winio |
||||||
|
|
||||||
|
This repository contains utilities for efficiently performing Win32 IO operations in |
||||||
|
Go. Currently, this is focused on accessing named pipes and other file handles, and |
||||||
|
for using named pipes as a net transport. |
||||||
|
|
||||||
|
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go |
||||||
|
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and |
||||||
|
newer operating systems. This is similar to the implementation of network sockets in Go's net |
||||||
|
package. |
||||||
|
|
||||||
|
Please see the LICENSE file for licensing information. |
||||||
|
|
||||||
|
This project has adopted the [Microsoft Open Source Code of |
||||||
|
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information |
||||||
|
see the [Code of Conduct |
||||||
|
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact |
||||||
|
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional |
||||||
|
questions or comments. |
||||||
|
|
||||||
|
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe |
||||||
|
for another named pipe implementation. |
@ -0,0 +1,280 @@ |
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/binary" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"io/ioutil" |
||||||
|
"os" |
||||||
|
"runtime" |
||||||
|
"syscall" |
||||||
|
"unicode/utf16" |
||||||
|
) |
||||||
|
|
||||||
|
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||||
|
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||||
|
|
||||||
|
const ( |
||||||
|
BackupData = uint32(iota + 1) |
||||||
|
BackupEaData |
||||||
|
BackupSecurity |
||||||
|
BackupAlternateData |
||||||
|
BackupLink |
||||||
|
BackupPropertyData |
||||||
|
BackupObjectId |
||||||
|
BackupReparseData |
||||||
|
BackupSparseBlock |
||||||
|
BackupTxfsData |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
StreamSparseAttributes = uint32(8) |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
WRITE_DAC = 0x40000 |
||||||
|
WRITE_OWNER = 0x80000 |
||||||
|
ACCESS_SYSTEM_SECURITY = 0x1000000 |
||||||
|
) |
||||||
|
|
||||||
|
// BackupHeader represents a backup stream of a file.
|
||||||
|
type BackupHeader struct { |
||||||
|
Id uint32 // The backup stream ID
|
||||||
|
Attributes uint32 // Stream attributes
|
||||||
|
Size int64 // The size of the stream in bytes
|
||||||
|
Name string // The name of the stream (for BackupAlternateData only).
|
||||||
|
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||||
|
} |
||||||
|
|
||||||
|
type win32StreamId struct { |
||||||
|
StreamId uint32 |
||||||
|
Attributes uint32 |
||||||
|
Size uint64 |
||||||
|
NameSize uint32 |
||||||
|
} |
||||||
|
|
||||||
|
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||||
|
// of BackupHeader values.
|
||||||
|
type BackupStreamReader struct { |
||||||
|
r io.Reader |
||||||
|
bytesLeft int64 |
||||||
|
} |
||||||
|
|
||||||
|
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||||
|
func NewBackupStreamReader(r io.Reader) *BackupStreamReader { |
||||||
|
return &BackupStreamReader{r, 0} |
||||||
|
} |
||||||
|
|
||||||
|
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||||
|
// it was not completely read.
|
||||||
|
func (r *BackupStreamReader) Next() (*BackupHeader, error) { |
||||||
|
if r.bytesLeft > 0 { |
||||||
|
if s, ok := r.r.(io.Seeker); ok { |
||||||
|
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||||
|
// before trying the actual seek.
|
||||||
|
if _, err := s.Seek(0, io.SeekCurrent); err == nil { |
||||||
|
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
r.bytesLeft = 0 |
||||||
|
} |
||||||
|
} |
||||||
|
if _, err := io.Copy(ioutil.Discard, r); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
var wsi win32StreamId |
||||||
|
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
hdr := &BackupHeader{ |
||||||
|
Id: wsi.StreamId, |
||||||
|
Attributes: wsi.Attributes, |
||||||
|
Size: int64(wsi.Size), |
||||||
|
} |
||||||
|
if wsi.NameSize != 0 { |
||||||
|
name := make([]uint16, int(wsi.NameSize/2)) |
||||||
|
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
hdr.Name = syscall.UTF16ToString(name) |
||||||
|
} |
||||||
|
if wsi.StreamId == BackupSparseBlock { |
||||||
|
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
hdr.Size -= 8 |
||||||
|
} |
||||||
|
r.bytesLeft = hdr.Size |
||||||
|
return hdr, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Read reads from the current backup stream.
|
||||||
|
func (r *BackupStreamReader) Read(b []byte) (int, error) { |
||||||
|
if r.bytesLeft == 0 { |
||||||
|
return 0, io.EOF |
||||||
|
} |
||||||
|
if int64(len(b)) > r.bytesLeft { |
||||||
|
b = b[:r.bytesLeft] |
||||||
|
} |
||||||
|
n, err := r.r.Read(b) |
||||||
|
r.bytesLeft -= int64(n) |
||||||
|
if err == io.EOF { |
||||||
|
err = io.ErrUnexpectedEOF |
||||||
|
} else if r.bytesLeft == 0 && err == nil { |
||||||
|
err = io.EOF |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||||
|
type BackupStreamWriter struct { |
||||||
|
w io.Writer |
||||||
|
bytesLeft int64 |
||||||
|
} |
||||||
|
|
||||||
|
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||||
|
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { |
||||||
|
return &BackupStreamWriter{w, 0} |
||||||
|
} |
||||||
|
|
||||||
|
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||||
|
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { |
||||||
|
if w.bytesLeft != 0 { |
||||||
|
return fmt.Errorf("missing %d bytes", w.bytesLeft) |
||||||
|
} |
||||||
|
name := utf16.Encode([]rune(hdr.Name)) |
||||||
|
wsi := win32StreamId{ |
||||||
|
StreamId: hdr.Id, |
||||||
|
Attributes: hdr.Attributes, |
||||||
|
Size: uint64(hdr.Size), |
||||||
|
NameSize: uint32(len(name) * 2), |
||||||
|
} |
||||||
|
if hdr.Id == BackupSparseBlock { |
||||||
|
// Include space for the int64 block offset
|
||||||
|
wsi.Size += 8 |
||||||
|
} |
||||||
|
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
if len(name) != 0 { |
||||||
|
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
if hdr.Id == BackupSparseBlock { |
||||||
|
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
w.bytesLeft = hdr.Size |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Write writes to the current backup stream.
|
||||||
|
func (w *BackupStreamWriter) Write(b []byte) (int, error) { |
||||||
|
if w.bytesLeft < int64(len(b)) { |
||||||
|
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) |
||||||
|
} |
||||||
|
n, err := w.w.Write(b) |
||||||
|
w.bytesLeft -= int64(n) |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||||
|
type BackupFileReader struct { |
||||||
|
f *os.File |
||||||
|
includeSecurity bool |
||||||
|
ctx uintptr |
||||||
|
} |
||||||
|
|
||||||
|
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||||
|
// Read will attempt to read the security descriptor of the file.
|
||||||
|
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { |
||||||
|
r := &BackupFileReader{f, includeSecurity, 0} |
||||||
|
return r |
||||||
|
} |
||||||
|
|
||||||
|
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||||
|
func (r *BackupFileReader) Read(b []byte) (int, error) { |
||||||
|
var bytesRead uint32 |
||||||
|
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) |
||||||
|
if err != nil { |
||||||
|
return 0, &os.PathError{"BackupRead", r.f.Name(), err} |
||||||
|
} |
||||||
|
runtime.KeepAlive(r.f) |
||||||
|
if bytesRead == 0 { |
||||||
|
return 0, io.EOF |
||||||
|
} |
||||||
|
return int(bytesRead), nil |
||||||
|
} |
||||||
|
|
||||||
|
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||||
|
// the underlying file.
|
||||||
|
func (r *BackupFileReader) Close() error { |
||||||
|
if r.ctx != 0 { |
||||||
|
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) |
||||||
|
runtime.KeepAlive(r.f) |
||||||
|
r.ctx = 0 |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||||
|
type BackupFileWriter struct { |
||||||
|
f *os.File |
||||||
|
includeSecurity bool |
||||||
|
ctx uintptr |
||||||
|
} |
||||||
|
|
||||||
|
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||||
|
// Write() will attempt to restore the security descriptor from the stream.
|
||||||
|
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { |
||||||
|
w := &BackupFileWriter{f, includeSecurity, 0} |
||||||
|
return w |
||||||
|
} |
||||||
|
|
||||||
|
// Write restores a portion of the file using the provided backup stream.
|
||||||
|
func (w *BackupFileWriter) Write(b []byte) (int, error) { |
||||||
|
var bytesWritten uint32 |
||||||
|
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) |
||||||
|
if err != nil { |
||||||
|
return 0, &os.PathError{"BackupWrite", w.f.Name(), err} |
||||||
|
} |
||||||
|
runtime.KeepAlive(w.f) |
||||||
|
if int(bytesWritten) != len(b) { |
||||||
|
return int(bytesWritten), errors.New("not all bytes could be written") |
||||||
|
} |
||||||
|
return len(b), nil |
||||||
|
} |
||||||
|
|
||||||
|
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||||
|
// close the underlying file.
|
||||||
|
func (w *BackupFileWriter) Close() error { |
||||||
|
if w.ctx != 0 { |
||||||
|
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) |
||||||
|
runtime.KeepAlive(w.f) |
||||||
|
w.ctx = 0 |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||||
|
// or restore privileges have been acquired.
|
||||||
|
//
|
||||||
|
// If the file opened was a directory, it cannot be used with Readdir().
|
||||||
|
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { |
||||||
|
winPath, err := syscall.UTF16FromString(path) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) |
||||||
|
if err != nil { |
||||||
|
err = &os.PathError{Op: "open", Path: path, Err: err} |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return os.NewFile(uintptr(h), path), nil |
||||||
|
} |
@ -0,0 +1,137 @@ |
|||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/binary" |
||||||
|
"errors" |
||||||
|
) |
||||||
|
|
||||||
|
type fileFullEaInformation struct { |
||||||
|
NextEntryOffset uint32 |
||||||
|
Flags uint8 |
||||||
|
NameLength uint8 |
||||||
|
ValueLength uint16 |
||||||
|
} |
||||||
|
|
||||||
|
var ( |
||||||
|
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) |
||||||
|
|
||||||
|
errInvalidEaBuffer = errors.New("invalid extended attribute buffer") |
||||||
|
errEaNameTooLarge = errors.New("extended attribute name too large") |
||||||
|
errEaValueTooLarge = errors.New("extended attribute value too large") |
||||||
|
) |
||||||
|
|
||||||
|
// ExtendedAttribute represents a single Windows EA.
|
||||||
|
type ExtendedAttribute struct { |
||||||
|
Name string |
||||||
|
Value []byte |
||||||
|
Flags uint8 |
||||||
|
} |
||||||
|
|
||||||
|
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { |
||||||
|
var info fileFullEaInformation |
||||||
|
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) |
||||||
|
if err != nil { |
||||||
|
err = errInvalidEaBuffer |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
nameOffset := fileFullEaInformationSize |
||||||
|
nameLen := int(info.NameLength) |
||||||
|
valueOffset := nameOffset + int(info.NameLength) + 1 |
||||||
|
valueLen := int(info.ValueLength) |
||||||
|
nextOffset := int(info.NextEntryOffset) |
||||||
|
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { |
||||||
|
err = errInvalidEaBuffer |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
ea.Name = string(b[nameOffset : nameOffset+nameLen]) |
||||||
|
ea.Value = b[valueOffset : valueOffset+valueLen] |
||||||
|
ea.Flags = info.Flags |
||||||
|
if info.NextEntryOffset != 0 { |
||||||
|
nb = b[info.NextEntryOffset:] |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||||
|
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||||
|
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { |
||||||
|
for len(b) != 0 { |
||||||
|
ea, nb, err := parseEa(b) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
eas = append(eas, ea) |
||||||
|
b = nb |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { |
||||||
|
if int(uint8(len(ea.Name))) != len(ea.Name) { |
||||||
|
return errEaNameTooLarge |
||||||
|
} |
||||||
|
if int(uint16(len(ea.Value))) != len(ea.Value) { |
||||||
|
return errEaValueTooLarge |
||||||
|
} |
||||||
|
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) |
||||||
|
withPadding := (entrySize + 3) &^ 3 |
||||||
|
nextOffset := uint32(0) |
||||||
|
if !last { |
||||||
|
nextOffset = withPadding |
||||||
|
} |
||||||
|
info := fileFullEaInformation{ |
||||||
|
NextEntryOffset: nextOffset, |
||||||
|
Flags: ea.Flags, |
||||||
|
NameLength: uint8(len(ea.Name)), |
||||||
|
ValueLength: uint16(len(ea.Value)), |
||||||
|
} |
||||||
|
|
||||||
|
err := binary.Write(buf, binary.LittleEndian, &info) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
_, err = buf.Write([]byte(ea.Name)) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
err = buf.WriteByte(0) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
_, err = buf.Write(ea.Value) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||||
|
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||||
|
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { |
||||||
|
var buf bytes.Buffer |
||||||
|
for i := range eas { |
||||||
|
last := false |
||||||
|
if i == len(eas)-1 { |
||||||
|
last = true |
||||||
|
} |
||||||
|
|
||||||
|
err := writeEa(&buf, &eas[i], last) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
return buf.Bytes(), nil |
||||||
|
} |
@ -0,0 +1,307 @@ |
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"io" |
||||||
|
"runtime" |
||||||
|
"sync" |
||||||
|
"sync/atomic" |
||||||
|
"syscall" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||||
|
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||||
|
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||||
|
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||||
|
|
||||||
|
type atomicBool int32 |
||||||
|
|
||||||
|
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } |
||||||
|
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } |
||||||
|
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } |
||||||
|
func (b *atomicBool) swap(new bool) bool { |
||||||
|
var newInt int32 |
||||||
|
if new { |
||||||
|
newInt = 1 |
||||||
|
} |
||||||
|
return atomic.SwapInt32((*int32)(b), newInt) == 1 |
||||||
|
} |
||||||
|
|
||||||
|
const ( |
||||||
|
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 |
||||||
|
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
ErrFileClosed = errors.New("file has already been closed") |
||||||
|
ErrTimeout = &timeoutError{} |
||||||
|
) |
||||||
|
|
||||||
|
type timeoutError struct{} |
||||||
|
|
||||||
|
func (e *timeoutError) Error() string { return "i/o timeout" } |
||||||
|
func (e *timeoutError) Timeout() bool { return true } |
||||||
|
func (e *timeoutError) Temporary() bool { return true } |
||||||
|
|
||||||
|
type timeoutChan chan struct{} |
||||||
|
|
||||||
|
var ioInitOnce sync.Once |
||||||
|
var ioCompletionPort syscall.Handle |
||||||
|
|
||||||
|
// ioResult contains the result of an asynchronous IO operation
|
||||||
|
type ioResult struct { |
||||||
|
bytes uint32 |
||||||
|
err error |
||||||
|
} |
||||||
|
|
||||||
|
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||||
|
type ioOperation struct { |
||||||
|
o syscall.Overlapped |
||||||
|
ch chan ioResult |
||||||
|
} |
||||||
|
|
||||||
|
func initIo() { |
||||||
|
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
ioCompletionPort = h |
||||||
|
go ioCompletionProcessor(h) |
||||||
|
} |
||||||
|
|
||||||
|
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||||
|
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||||
|
type win32File struct { |
||||||
|
handle syscall.Handle |
||||||
|
wg sync.WaitGroup |
||||||
|
wgLock sync.RWMutex |
||||||
|
closing atomicBool |
||||||
|
readDeadline deadlineHandler |
||||||
|
writeDeadline deadlineHandler |
||||||
|
} |
||||||
|
|
||||||
|
type deadlineHandler struct { |
||||||
|
setLock sync.Mutex |
||||||
|
channel timeoutChan |
||||||
|
channelLock sync.RWMutex |
||||||
|
timer *time.Timer |
||||||
|
timedout atomicBool |
||||||
|
} |
||||||
|
|
||||||
|
// makeWin32File makes a new win32File from an existing file handle
|
||||||
|
func makeWin32File(h syscall.Handle) (*win32File, error) { |
||||||
|
f := &win32File{handle: h} |
||||||
|
ioInitOnce.Do(initIo) |
||||||
|
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
f.readDeadline.channel = make(timeoutChan) |
||||||
|
f.writeDeadline.channel = make(timeoutChan) |
||||||
|
return f, nil |
||||||
|
} |
||||||
|
|
||||||
|
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { |
||||||
|
return makeWin32File(h) |
||||||
|
} |
||||||
|
|
||||||
|
// closeHandle closes the resources associated with a Win32 handle
|
||||||
|
func (f *win32File) closeHandle() { |
||||||
|
f.wgLock.Lock() |
||||||
|
// Atomically set that we are closing, releasing the resources only once.
|
||||||
|
if !f.closing.swap(true) { |
||||||
|
f.wgLock.Unlock() |
||||||
|
// cancel all IO and wait for it to complete
|
||||||
|
cancelIoEx(f.handle, nil) |
||||||
|
f.wg.Wait() |
||||||
|
// at this point, no new IO can start
|
||||||
|
syscall.Close(f.handle) |
||||||
|
f.handle = 0 |
||||||
|
} else { |
||||||
|
f.wgLock.Unlock() |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Close closes a win32File.
|
||||||
|
func (f *win32File) Close() error { |
||||||
|
f.closeHandle() |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// prepareIo prepares for a new IO operation.
|
||||||
|
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||||
|
func (f *win32File) prepareIo() (*ioOperation, error) { |
||||||
|
f.wgLock.RLock() |
||||||
|
if f.closing.isSet() { |
||||||
|
f.wgLock.RUnlock() |
||||||
|
return nil, ErrFileClosed |
||||||
|
} |
||||||
|
f.wg.Add(1) |
||||||
|
f.wgLock.RUnlock() |
||||||
|
c := &ioOperation{} |
||||||
|
c.ch = make(chan ioResult) |
||||||
|
return c, nil |
||||||
|
} |
||||||
|
|
||||||
|
// ioCompletionProcessor processes completed async IOs forever
|
||||||
|
func ioCompletionProcessor(h syscall.Handle) { |
||||||
|
for { |
||||||
|
var bytes uint32 |
||||||
|
var key uintptr |
||||||
|
var op *ioOperation |
||||||
|
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) |
||||||
|
if op == nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
op.ch <- ioResult{bytes, err} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||||
|
// the operation has actually completed.
|
||||||
|
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { |
||||||
|
if err != syscall.ERROR_IO_PENDING { |
||||||
|
return int(bytes), err |
||||||
|
} |
||||||
|
|
||||||
|
if f.closing.isSet() { |
||||||
|
cancelIoEx(f.handle, &c.o) |
||||||
|
} |
||||||
|
|
||||||
|
var timeout timeoutChan |
||||||
|
if d != nil { |
||||||
|
d.channelLock.Lock() |
||||||
|
timeout = d.channel |
||||||
|
d.channelLock.Unlock() |
||||||
|
} |
||||||
|
|
||||||
|
var r ioResult |
||||||
|
select { |
||||||
|
case r = <-c.ch: |
||||||
|
err = r.err |
||||||
|
if err == syscall.ERROR_OPERATION_ABORTED { |
||||||
|
if f.closing.isSet() { |
||||||
|
err = ErrFileClosed |
||||||
|
} |
||||||
|
} |
||||||
|
case <-timeout: |
||||||
|
cancelIoEx(f.handle, &c.o) |
||||||
|
r = <-c.ch |
||||||
|
err = r.err |
||||||
|
if err == syscall.ERROR_OPERATION_ABORTED { |
||||||
|
err = ErrTimeout |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// runtime.KeepAlive is needed, as c is passed via native
|
||||||
|
// code to ioCompletionProcessor, c must remain alive
|
||||||
|
// until the channel read is complete.
|
||||||
|
runtime.KeepAlive(c) |
||||||
|
return int(r.bytes), err |
||||||
|
} |
||||||
|
|
||||||
|
// Read reads from a file handle.
|
||||||
|
func (f *win32File) Read(b []byte) (int, error) { |
||||||
|
c, err := f.prepareIo() |
||||||
|
if err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
defer f.wg.Done() |
||||||
|
|
||||||
|
if f.readDeadline.timedout.isSet() { |
||||||
|
return 0, ErrTimeout |
||||||
|
} |
||||||
|
|
||||||
|
var bytes uint32 |
||||||
|
err = syscall.ReadFile(f.handle, b, &bytes, &c.o) |
||||||
|
n, err := f.asyncIo(c, &f.readDeadline, bytes, err) |
||||||
|
runtime.KeepAlive(b) |
||||||
|
|
||||||
|
// Handle EOF conditions.
|
||||||
|
if err == nil && n == 0 && len(b) != 0 { |
||||||
|
return 0, io.EOF |
||||||
|
} else if err == syscall.ERROR_BROKEN_PIPE { |
||||||
|
return 0, io.EOF |
||||||
|
} else { |
||||||
|
return n, err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Write writes to a file handle.
|
||||||
|
func (f *win32File) Write(b []byte) (int, error) { |
||||||
|
c, err := f.prepareIo() |
||||||
|
if err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
defer f.wg.Done() |
||||||
|
|
||||||
|
if f.writeDeadline.timedout.isSet() { |
||||||
|
return 0, ErrTimeout |
||||||
|
} |
||||||
|
|
||||||
|
var bytes uint32 |
||||||
|
err = syscall.WriteFile(f.handle, b, &bytes, &c.o) |
||||||
|
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) |
||||||
|
runtime.KeepAlive(b) |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
func (f *win32File) SetReadDeadline(deadline time.Time) error { |
||||||
|
return f.readDeadline.set(deadline) |
||||||
|
} |
||||||
|
|
||||||
|
func (f *win32File) SetWriteDeadline(deadline time.Time) error { |
||||||
|
return f.writeDeadline.set(deadline) |
||||||
|
} |
||||||
|
|
||||||
|
func (f *win32File) Flush() error { |
||||||
|
return syscall.FlushFileBuffers(f.handle) |
||||||
|
} |
||||||
|
|
||||||
|
func (d *deadlineHandler) set(deadline time.Time) error { |
||||||
|
d.setLock.Lock() |
||||||
|
defer d.setLock.Unlock() |
||||||
|
|
||||||
|
if d.timer != nil { |
||||||
|
if !d.timer.Stop() { |
||||||
|
<-d.channel |
||||||
|
} |
||||||
|
d.timer = nil |
||||||
|
} |
||||||
|
d.timedout.setFalse() |
||||||
|
|
||||||
|
select { |
||||||
|
case <-d.channel: |
||||||
|
d.channelLock.Lock() |
||||||
|
d.channel = make(chan struct{}) |
||||||
|
d.channelLock.Unlock() |
||||||
|
default: |
||||||
|
} |
||||||
|
|
||||||
|
if deadline.IsZero() { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
timeoutIO := func() { |
||||||
|
d.timedout.setTrue() |
||||||
|
close(d.channel) |
||||||
|
} |
||||||
|
|
||||||
|
now := time.Now() |
||||||
|
duration := deadline.Sub(now) |
||||||
|
if deadline.After(now) { |
||||||
|
// Deadline is in the future, set a timer to wait
|
||||||
|
d.timer = time.AfterFunc(duration, timeoutIO) |
||||||
|
} else { |
||||||
|
// Deadline is in the past. Cancel all pending IO now.
|
||||||
|
timeoutIO() |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,61 @@ |
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"os" |
||||||
|
"runtime" |
||||||
|
"syscall" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||||
|
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||||
|
|
||||||
|
const ( |
||||||
|
fileBasicInfo = 0 |
||||||
|
fileIDInfo = 0x12 |
||||||
|
) |
||||||
|
|
||||||
|
// FileBasicInfo contains file access time and file attributes information.
|
||||||
|
type FileBasicInfo struct { |
||||||
|
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime |
||||||
|
FileAttributes uint32 |
||||||
|
pad uint32 // padding
|
||||||
|
} |
||||||
|
|
||||||
|
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||||
|
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { |
||||||
|
bi := &FileBasicInfo{} |
||||||
|
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { |
||||||
|
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} |
||||||
|
} |
||||||
|
runtime.KeepAlive(f) |
||||||
|
return bi, nil |
||||||
|
} |
||||||
|
|
||||||
|
// SetFileBasicInfo sets times and attributes for a file.
|
||||||
|
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { |
||||||
|
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { |
||||||
|
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} |
||||||
|
} |
||||||
|
runtime.KeepAlive(f) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||||
|
// unique on a system.
|
||||||
|
type FileIDInfo struct { |
||||||
|
VolumeSerialNumber uint64 |
||||||
|
FileID [16]byte |
||||||
|
} |
||||||
|
|
||||||
|
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||||
|
func GetFileID(f *os.File) (*FileIDInfo, error) { |
||||||
|
fileID := &FileIDInfo{} |
||||||
|
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { |
||||||
|
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} |
||||||
|
} |
||||||
|
runtime.KeepAlive(f) |
||||||
|
return fileID, nil |
||||||
|
} |
@ -0,0 +1,421 @@ |
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"io" |
||||||
|
"net" |
||||||
|
"os" |
||||||
|
"syscall" |
||||||
|
"time" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||||
|
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||||
|
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||||
|
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||||
|
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||||
|
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||||
|
|
||||||
|
const ( |
||||||
|
cERROR_PIPE_BUSY = syscall.Errno(231) |
||||||
|
cERROR_NO_DATA = syscall.Errno(232) |
||||||
|
cERROR_PIPE_CONNECTED = syscall.Errno(535) |
||||||
|
cERROR_SEM_TIMEOUT = syscall.Errno(121) |
||||||
|
|
||||||
|
cPIPE_ACCESS_DUPLEX = 0x3 |
||||||
|
cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 |
||||||
|
cSECURITY_SQOS_PRESENT = 0x100000 |
||||||
|
cSECURITY_ANONYMOUS = 0 |
||||||
|
|
||||||
|
cPIPE_REJECT_REMOTE_CLIENTS = 0x8 |
||||||
|
|
||||||
|
cPIPE_UNLIMITED_INSTANCES = 255 |
||||||
|
|
||||||
|
cNMPWAIT_USE_DEFAULT_WAIT = 0 |
||||||
|
cNMPWAIT_NOWAIT = 1 |
||||||
|
|
||||||
|
cPIPE_TYPE_MESSAGE = 4 |
||||||
|
|
||||||
|
cPIPE_READMODE_MESSAGE = 2 |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
|
||||||
|
// This error should match net.errClosing since docker takes a dependency on its text.
|
||||||
|
ErrPipeListenerClosed = errors.New("use of closed network connection") |
||||||
|
|
||||||
|
errPipeWriteClosed = errors.New("pipe has been closed for write") |
||||||
|
) |
||||||
|
|
||||||
|
type win32Pipe struct { |
||||||
|
*win32File |
||||||
|
path string |
||||||
|
} |
||||||
|
|
||||||
|
type win32MessageBytePipe struct { |
||||||
|
win32Pipe |
||||||
|
writeClosed bool |
||||||
|
readEOF bool |
||||||
|
} |
||||||
|
|
||||||
|
type pipeAddress string |
||||||
|
|
||||||
|
func (f *win32Pipe) LocalAddr() net.Addr { |
||||||
|
return pipeAddress(f.path) |
||||||
|
} |
||||||
|
|
||||||
|
func (f *win32Pipe) RemoteAddr() net.Addr { |
||||||
|
return pipeAddress(f.path) |
||||||
|
} |
||||||
|
|
||||||
|
func (f *win32Pipe) SetDeadline(t time.Time) error { |
||||||
|
f.SetReadDeadline(t) |
||||||
|
f.SetWriteDeadline(t) |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||||
|
func (f *win32MessageBytePipe) CloseWrite() error { |
||||||
|
if f.writeClosed { |
||||||
|
return errPipeWriteClosed |
||||||
|
} |
||||||
|
err := f.win32File.Flush() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
_, err = f.win32File.Write(nil) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
f.writeClosed = true |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
|
||||||
|
// they are used to implement CloseWrite().
|
||||||
|
func (f *win32MessageBytePipe) Write(b []byte) (int, error) { |
||||||
|
if f.writeClosed { |
||||||
|
return 0, errPipeWriteClosed |
||||||
|
} |
||||||
|
if len(b) == 0 { |
||||||
|
return 0, nil |
||||||
|
} |
||||||
|
return f.win32File.Write(b) |
||||||
|
} |
||||||
|
|
||||||
|
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
|
||||||
|
// mode pipe will return io.EOF, as will all subsequent reads.
|
||||||
|
func (f *win32MessageBytePipe) Read(b []byte) (int, error) { |
||||||
|
if f.readEOF { |
||||||
|
return 0, io.EOF |
||||||
|
} |
||||||
|
n, err := f.win32File.Read(b) |
||||||
|
if err == io.EOF { |
||||||
|
// If this was the result of a zero-byte read, then
|
||||||
|
// it is possible that the read was due to a zero-size
|
||||||
|
// message. Since we are simulating CloseWrite with a
|
||||||
|
// zero-byte message, ensure that all future Read() calls
|
||||||
|
// also return EOF.
|
||||||
|
f.readEOF = true |
||||||
|
} else if err == syscall.ERROR_MORE_DATA { |
||||||
|
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||||
|
// and the message still has more bytes. Treat this as a success, since
|
||||||
|
// this package presents all named pipes as byte streams.
|
||||||
|
err = nil |
||||||
|
} |
||||||
|
return n, err |
||||||
|
} |
||||||
|
|
||||||
|
func (s pipeAddress) Network() string { |
||||||
|
return "pipe" |
||||||
|
} |
||||||
|
|
||||||
|
func (s pipeAddress) String() string { |
||||||
|
return string(s) |
||||||
|
} |
||||||
|
|
||||||
|
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||||
|
// takes longer than the specified duration. If timeout is nil, then we use
|
||||||
|
// a default timeout of 5 seconds. (We do not use WaitNamedPipe.)
|
||||||
|
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { |
||||||
|
var absTimeout time.Time |
||||||
|
if timeout != nil { |
||||||
|
absTimeout = time.Now().Add(*timeout) |
||||||
|
} else { |
||||||
|
absTimeout = time.Now().Add(time.Second * 2) |
||||||
|
} |
||||||
|
var err error |
||||||
|
var h syscall.Handle |
||||||
|
for { |
||||||
|
h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) |
||||||
|
if err != cERROR_PIPE_BUSY { |
||||||
|
break |
||||||
|
} |
||||||
|
if time.Now().After(absTimeout) { |
||||||
|
return nil, ErrTimeout |
||||||
|
} |
||||||
|
|
||||||
|
// Wait 10 msec and try again. This is a rather simplistic
|
||||||
|
// view, as we always try each 10 milliseconds.
|
||||||
|
time.Sleep(time.Millisecond * 10) |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return nil, &os.PathError{Op: "open", Path: path, Err: err} |
||||||
|
} |
||||||
|
|
||||||
|
var flags uint32 |
||||||
|
err = getNamedPipeInfo(h, &flags, nil, nil, nil) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
f, err := makeWin32File(h) |
||||||
|
if err != nil { |
||||||
|
syscall.Close(h) |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// If the pipe is in message mode, return a message byte pipe, which
|
||||||
|
// supports CloseWrite().
|
||||||
|
if flags&cPIPE_TYPE_MESSAGE != 0 { |
||||||
|
return &win32MessageBytePipe{ |
||||||
|
win32Pipe: win32Pipe{win32File: f, path: path}, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
return &win32Pipe{win32File: f, path: path}, nil |
||||||
|
} |
||||||
|
|
||||||
|
type acceptResponse struct { |
||||||
|
f *win32File |
||||||
|
err error |
||||||
|
} |
||||||
|
|
||||||
|
type win32PipeListener struct { |
||||||
|
firstHandle syscall.Handle |
||||||
|
path string |
||||||
|
securityDescriptor []byte |
||||||
|
config PipeConfig |
||||||
|
acceptCh chan (chan acceptResponse) |
||||||
|
closeCh chan int |
||||||
|
doneCh chan int |
||||||
|
} |
||||||
|
|
||||||
|
func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { |
||||||
|
var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED |
||||||
|
if first { |
||||||
|
flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE |
||||||
|
} |
||||||
|
|
||||||
|
var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS |
||||||
|
if c.MessageMode { |
||||||
|
mode |= cPIPE_TYPE_MESSAGE |
||||||
|
} |
||||||
|
|
||||||
|
sa := &syscall.SecurityAttributes{} |
||||||
|
sa.Length = uint32(unsafe.Sizeof(*sa)) |
||||||
|
if securityDescriptor != nil { |
||||||
|
len := uint32(len(securityDescriptor)) |
||||||
|
sa.SecurityDescriptor = localAlloc(0, len) |
||||||
|
defer localFree(sa.SecurityDescriptor) |
||||||
|
copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) |
||||||
|
} |
||||||
|
h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) |
||||||
|
if err != nil { |
||||||
|
return 0, &os.PathError{Op: "open", Path: path, Err: err} |
||||||
|
} |
||||||
|
return h, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (l *win32PipeListener) makeServerPipe() (*win32File, error) { |
||||||
|
h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
f, err := makeWin32File(h) |
||||||
|
if err != nil { |
||||||
|
syscall.Close(h) |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return f, nil |
||||||
|
} |
||||||
|
|
||||||
|
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { |
||||||
|
p, err := l.makeServerPipe() |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
// Wait for the client to connect.
|
||||||
|
ch := make(chan error) |
||||||
|
go func(p *win32File) { |
||||||
|
ch <- connectPipe(p) |
||||||
|
}(p) |
||||||
|
|
||||||
|
select { |
||||||
|
case err = <-ch: |
||||||
|
if err != nil { |
||||||
|
p.Close() |
||||||
|
p = nil |
||||||
|
} |
||||||
|
case <-l.closeCh: |
||||||
|
// Abort the connect request by closing the handle.
|
||||||
|
p.Close() |
||||||
|
p = nil |
||||||
|
err = <-ch |
||||||
|
if err == nil || err == ErrFileClosed { |
||||||
|
err = ErrPipeListenerClosed |
||||||
|
} |
||||||
|
} |
||||||
|
return p, err |
||||||
|
} |
||||||
|
|
||||||
|
func (l *win32PipeListener) listenerRoutine() { |
||||||
|
closed := false |
||||||
|
for !closed { |
||||||
|
select { |
||||||
|
case <-l.closeCh: |
||||||
|
closed = true |
||||||
|
case responseCh := <-l.acceptCh: |
||||||
|
var ( |
||||||
|
p *win32File |
||||||
|
err error |
||||||
|
) |
||||||
|
for { |
||||||
|
p, err = l.makeConnectedServerPipe() |
||||||
|
// If the connection was immediately closed by the client, try
|
||||||
|
// again.
|
||||||
|
if err != cERROR_NO_DATA { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
responseCh <- acceptResponse{p, err} |
||||||
|
closed = err == ErrPipeListenerClosed |
||||||
|
} |
||||||
|
} |
||||||
|
syscall.Close(l.firstHandle) |
||||||
|
l.firstHandle = 0 |
||||||
|
// Notify Close() and Accept() callers that the handle has been closed.
|
||||||
|
close(l.doneCh) |
||||||
|
} |
||||||
|
|
||||||
|
// PipeConfig contain configuration for the pipe listener.
|
||||||
|
type PipeConfig struct { |
||||||
|
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
|
||||||
|
SecurityDescriptor string |
||||||
|
|
||||||
|
// MessageMode determines whether the pipe is in byte or message mode. In either
|
||||||
|
// case the pipe is read in byte mode by default. The only practical difference in
|
||||||
|
// this implementation is that CloseWrite() is only supported for message mode pipes;
|
||||||
|
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
|
||||||
|
// transferred to the reader (and returned as io.EOF in this implementation)
|
||||||
|
// when the pipe is in message mode.
|
||||||
|
MessageMode bool |
||||||
|
|
||||||
|
// InputBufferSize specifies the size the input buffer, in bytes.
|
||||||
|
InputBufferSize int32 |
||||||
|
|
||||||
|
// OutputBufferSize specifies the size the input buffer, in bytes.
|
||||||
|
OutputBufferSize int32 |
||||||
|
} |
||||||
|
|
||||||
|
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||||
|
// The pipe must not already exist.
|
||||||
|
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { |
||||||
|
var ( |
||||||
|
sd []byte |
||||||
|
err error |
||||||
|
) |
||||||
|
if c == nil { |
||||||
|
c = &PipeConfig{} |
||||||
|
} |
||||||
|
if c.SecurityDescriptor != "" { |
||||||
|
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
h, err := makeServerPipeHandle(path, sd, c, true) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
// Create a client handle and connect it. This results in the pipe
|
||||||
|
// instance always existing, so that clients see ERROR_PIPE_BUSY
|
||||||
|
// rather than ERROR_FILE_NOT_FOUND. This ties the first instance
|
||||||
|
// up so that no other instances can be used. This would have been
|
||||||
|
// cleaner if the Win32 API matched CreateFile with ConnectNamedPipe
|
||||||
|
// instead of CreateNamedPipe. (Apparently created named pipes are
|
||||||
|
// considered to be in listening state regardless of whether any
|
||||||
|
// active calls to ConnectNamedPipe are outstanding.)
|
||||||
|
h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) |
||||||
|
if err != nil { |
||||||
|
syscall.Close(h) |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
// Close the client handle. The server side of the instance will
|
||||||
|
// still be busy, leading to ERROR_PIPE_BUSY instead of
|
||||||
|
// ERROR_NOT_FOUND, as long as we don't close the server handle,
|
||||||
|
// or disconnect the client with DisconnectNamedPipe.
|
||||||
|
syscall.Close(h2) |
||||||
|
l := &win32PipeListener{ |
||||||
|
firstHandle: h, |
||||||
|
path: path, |
||||||
|
securityDescriptor: sd, |
||||||
|
config: *c, |
||||||
|
acceptCh: make(chan (chan acceptResponse)), |
||||||
|
closeCh: make(chan int), |
||||||
|
doneCh: make(chan int), |
||||||
|
} |
||||||
|
go l.listenerRoutine() |
||||||
|
return l, nil |
||||||
|
} |
||||||
|
|
||||||
|
func connectPipe(p *win32File) error { |
||||||
|
c, err := p.prepareIo() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer p.wg.Done() |
||||||
|
|
||||||
|
err = connectNamedPipe(p.handle, &c.o) |
||||||
|
_, err = p.asyncIo(c, nil, 0, err) |
||||||
|
if err != nil && err != cERROR_PIPE_CONNECTED { |
||||||
|
return err |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (l *win32PipeListener) Accept() (net.Conn, error) { |
||||||
|
ch := make(chan acceptResponse) |
||||||
|
select { |
||||||
|
case l.acceptCh <- ch: |
||||||
|
response := <-ch |
||||||
|
err := response.err |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if l.config.MessageMode { |
||||||
|
return &win32MessageBytePipe{ |
||||||
|
win32Pipe: win32Pipe{win32File: response.f, path: l.path}, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
return &win32Pipe{win32File: response.f, path: l.path}, nil |
||||||
|
case <-l.doneCh: |
||||||
|
return nil, ErrPipeListenerClosed |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (l *win32PipeListener) Close() error { |
||||||
|
select { |
||||||
|
case l.closeCh <- 1: |
||||||
|
<-l.doneCh |
||||||
|
case <-l.doneCh: |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func (l *win32PipeListener) Addr() net.Addr { |
||||||
|
return pipeAddress(l.path) |
||||||
|
} |
@ -0,0 +1,202 @@ |
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/binary" |
||||||
|
"fmt" |
||||||
|
"runtime" |
||||||
|
"sync" |
||||||
|
"syscall" |
||||||
|
"unicode/utf16" |
||||||
|
|
||||||
|
"golang.org/x/sys/windows" |
||||||
|
) |
||||||
|
|
||||||
|
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||||
|
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||||
|
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||||
|
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||||
|
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
||||||
|
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||||
|
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||||
|
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||||
|
|
||||||
|
const ( |
||||||
|
SE_PRIVILEGE_ENABLED = 2 |
||||||
|
|
||||||
|
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 |
||||||
|
|
||||||
|
SeBackupPrivilege = "SeBackupPrivilege" |
||||||
|
SeRestorePrivilege = "SeRestorePrivilege" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
securityAnonymous = iota |
||||||
|
securityIdentification |
||||||
|
securityImpersonation |
||||||
|
securityDelegation |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
privNames = make(map[string]uint64) |
||||||
|
privNameMutex sync.Mutex |
||||||
|
) |
||||||
|
|
||||||
|
// PrivilegeError represents an error enabling privileges.
|
||||||
|
type PrivilegeError struct { |
||||||
|
privileges []uint64 |
||||||
|
} |
||||||
|
|
||||||
|
func (e *PrivilegeError) Error() string { |
||||||
|
s := "" |
||||||
|
if len(e.privileges) > 1 { |
||||||
|
s = "Could not enable privileges " |
||||||
|
} else { |
||||||
|
s = "Could not enable privilege " |
||||||
|
} |
||||||
|
for i, p := range e.privileges { |
||||||
|
if i != 0 { |
||||||
|
s += ", " |
||||||
|
} |
||||||
|
s += `"` |
||||||
|
s += getPrivilegeName(p) |
||||||
|
s += `"` |
||||||
|
} |
||||||
|
return s |
||||||
|
} |
||||||
|
|
||||||
|
// RunWithPrivilege enables a single privilege for a function call.
|
||||||
|
func RunWithPrivilege(name string, fn func() error) error { |
||||||
|
return RunWithPrivileges([]string{name}, fn) |
||||||
|
} |
||||||
|
|
||||||
|
// RunWithPrivileges enables privileges for a function call.
|
||||||
|
func RunWithPrivileges(names []string, fn func() error) error { |
||||||
|
privileges, err := mapPrivileges(names) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
runtime.LockOSThread() |
||||||
|
defer runtime.UnlockOSThread() |
||||||
|
token, err := newThreadToken() |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer releaseThreadToken(token) |
||||||
|
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
return fn() |
||||||
|
} |
||||||
|
|
||||||
|
func mapPrivileges(names []string) ([]uint64, error) { |
||||||
|
var privileges []uint64 |
||||||
|
privNameMutex.Lock() |
||||||
|
defer privNameMutex.Unlock() |
||||||
|
for _, name := range names { |
||||||
|
p, ok := privNames[name] |
||||||
|
if !ok { |
||||||
|
err := lookupPrivilegeValue("", name, &p) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
privNames[name] = p |
||||||
|
} |
||||||
|
privileges = append(privileges, p) |
||||||
|
} |
||||||
|
return privileges, nil |
||||||
|
} |
||||||
|
|
||||||
|
// EnableProcessPrivileges enables privileges globally for the process.
|
||||||
|
func EnableProcessPrivileges(names []string) error { |
||||||
|
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) |
||||||
|
} |
||||||
|
|
||||||
|
// DisableProcessPrivileges disables privileges globally for the process.
|
||||||
|
func DisableProcessPrivileges(names []string) error { |
||||||
|
return enableDisableProcessPrivilege(names, 0) |
||||||
|
} |
||||||
|
|
||||||
|
func enableDisableProcessPrivilege(names []string, action uint32) error { |
||||||
|
privileges, err := mapPrivileges(names) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
p, _ := windows.GetCurrentProcess() |
||||||
|
var token windows.Token |
||||||
|
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
defer token.Close() |
||||||
|
return adjustPrivileges(token, privileges, action) |
||||||
|
} |
||||||
|
|
||||||
|
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { |
||||||
|
var b bytes.Buffer |
||||||
|
binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) |
||||||
|
for _, p := range privileges { |
||||||
|
binary.Write(&b, binary.LittleEndian, p) |
||||||
|
binary.Write(&b, binary.LittleEndian, action) |
||||||
|
} |
||||||
|
prevState := make([]byte, b.Len()) |
||||||
|
reqSize := uint32(0) |
||||||
|
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) |
||||||
|
if !success { |
||||||
|
return err |
||||||
|
} |
||||||
|
if err == ERROR_NOT_ALL_ASSIGNED { |
||||||
|
return &PrivilegeError{privileges} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func getPrivilegeName(luid uint64) string { |
||||||
|
var nameBuffer [256]uint16 |
||||||
|
bufSize := uint32(len(nameBuffer)) |
||||||
|
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) |
||||||
|
if err != nil { |
||||||
|
return fmt.Sprintf("<unknown privilege %d>", luid) |
||||||
|
} |
||||||
|
|
||||||
|
var displayNameBuffer [256]uint16 |
||||||
|
displayBufSize := uint32(len(displayNameBuffer)) |
||||||
|
var langID uint32 |
||||||
|
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) |
||||||
|
if err != nil { |
||||||
|
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize]))) |
||||||
|
} |
||||||
|
|
||||||
|
return string(utf16.Decode(displayNameBuffer[:displayBufSize])) |
||||||
|
} |
||||||
|
|
||||||
|
func newThreadToken() (windows.Token, error) { |
||||||
|
err := impersonateSelf(securityImpersonation) |
||||||
|
if err != nil { |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
|
||||||
|
var token windows.Token |
||||||
|
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) |
||||||
|
if err != nil { |
||||||
|
rerr := revertToSelf() |
||||||
|
if rerr != nil { |
||||||
|
panic(rerr) |
||||||
|
} |
||||||
|
return 0, err |
||||||
|
} |
||||||
|
return token, nil |
||||||
|
} |
||||||
|
|
||||||
|
func releaseThreadToken(h windows.Token) { |
||||||
|
err := revertToSelf() |
||||||
|
if err != nil { |
||||||
|
panic(err) |
||||||
|
} |
||||||
|
h.Close() |
||||||
|
} |
@ -0,0 +1,128 @@ |
|||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/binary" |
||||||
|
"fmt" |
||||||
|
"strings" |
||||||
|
"unicode/utf16" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
reparseTagMountPoint = 0xA0000003 |
||||||
|
reparseTagSymlink = 0xA000000C |
||||||
|
) |
||||||
|
|
||||||
|
type reparseDataBuffer struct { |
||||||
|
ReparseTag uint32 |
||||||
|
ReparseDataLength uint16 |
||||||
|
Reserved uint16 |
||||||
|
SubstituteNameOffset uint16 |
||||||
|
SubstituteNameLength uint16 |
||||||
|
PrintNameOffset uint16 |
||||||
|
PrintNameLength uint16 |
||||||
|
} |
||||||
|
|
||||||
|
// ReparsePoint describes a Win32 symlink or mount point.
|
||||||
|
type ReparsePoint struct { |
||||||
|
Target string |
||||||
|
IsMountPoint bool |
||||||
|
} |
||||||
|
|
||||||
|
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
|
||||||
|
// mount point reparse point.
|
||||||
|
type UnsupportedReparsePointError struct { |
||||||
|
Tag uint32 |
||||||
|
} |
||||||
|
|
||||||
|
func (e *UnsupportedReparsePointError) Error() string { |
||||||
|
return fmt.Sprintf("unsupported reparse point %x", e.Tag) |
||||||
|
} |
||||||
|
|
||||||
|
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
||||||
|
// or a mount point.
|
||||||
|
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { |
||||||
|
tag := binary.LittleEndian.Uint32(b[0:4]) |
||||||
|
return DecodeReparsePointData(tag, b[8:]) |
||||||
|
} |
||||||
|
|
||||||
|
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { |
||||||
|
isMountPoint := false |
||||||
|
switch tag { |
||||||
|
case reparseTagMountPoint: |
||||||
|
isMountPoint = true |
||||||
|
case reparseTagSymlink: |
||||||
|
default: |
||||||
|
return nil, &UnsupportedReparsePointError{tag} |
||||||
|
} |
||||||
|
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) |
||||||
|
if !isMountPoint { |
||||||
|
nameOffset += 4 |
||||||
|
} |
||||||
|
nameLength := binary.LittleEndian.Uint16(b[6:8]) |
||||||
|
name := make([]uint16, nameLength/2) |
||||||
|
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil |
||||||
|
} |
||||||
|
|
||||||
|
func isDriveLetter(c byte) bool { |
||||||
|
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') |
||||||
|
} |
||||||
|
|
||||||
|
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
|
||||||
|
// mount point.
|
||||||
|
func EncodeReparsePoint(rp *ReparsePoint) []byte { |
||||||
|
// Generate an NT path and determine if this is a relative path.
|
||||||
|
var ntTarget string |
||||||
|
relative := false |
||||||
|
if strings.HasPrefix(rp.Target, `\\?\`) { |
||||||
|
ntTarget = `\??\` + rp.Target[4:] |
||||||
|
} else if strings.HasPrefix(rp.Target, `\\`) { |
||||||
|
ntTarget = `\??\UNC\` + rp.Target[2:] |
||||||
|
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { |
||||||
|
ntTarget = `\??\` + rp.Target |
||||||
|
} else { |
||||||
|
ntTarget = rp.Target |
||||||
|
relative = true |
||||||
|
} |
||||||
|
|
||||||
|
// The paths must be NUL-terminated even though they are counted strings.
|
||||||
|
target16 := utf16.Encode([]rune(rp.Target + "\x00")) |
||||||
|
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) |
||||||
|
|
||||||
|
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 |
||||||
|
size += len(ntTarget16)*2 + len(target16)*2 |
||||||
|
|
||||||
|
tag := uint32(reparseTagMountPoint) |
||||||
|
if !rp.IsMountPoint { |
||||||
|
tag = reparseTagSymlink |
||||||
|
size += 4 // Add room for symlink flags
|
||||||
|
} |
||||||
|
|
||||||
|
data := reparseDataBuffer{ |
||||||
|
ReparseTag: tag, |
||||||
|
ReparseDataLength: uint16(size), |
||||||
|
SubstituteNameOffset: 0, |
||||||
|
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), |
||||||
|
PrintNameOffset: uint16(len(ntTarget16) * 2), |
||||||
|
PrintNameLength: uint16((len(target16) - 1) * 2), |
||||||
|
} |
||||||
|
|
||||||
|
var b bytes.Buffer |
||||||
|
binary.Write(&b, binary.LittleEndian, &data) |
||||||
|
if !rp.IsMountPoint { |
||||||
|
flags := uint32(0) |
||||||
|
if relative { |
||||||
|
flags |= 1 |
||||||
|
} |
||||||
|
binary.Write(&b, binary.LittleEndian, flags) |
||||||
|
} |
||||||
|
|
||||||
|
binary.Write(&b, binary.LittleEndian, ntTarget16) |
||||||
|
binary.Write(&b, binary.LittleEndian, target16) |
||||||
|
return b.Bytes() |
||||||
|
} |
@ -0,0 +1,98 @@ |
|||||||
|
// +build windows
|
||||||
|
|
||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"syscall" |
||||||
|
"unsafe" |
||||||
|
) |
||||||
|
|
||||||
|
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
|
||||||
|
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||||
|
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
||||||
|
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
||||||
|
//sys localFree(mem uintptr) = LocalFree
|
||||||
|
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
||||||
|
|
||||||
|
const ( |
||||||
|
cERROR_NONE_MAPPED = syscall.Errno(1332) |
||||||
|
) |
||||||
|
|
||||||
|
type AccountLookupError struct { |
||||||
|
Name string |
||||||
|
Err error |
||||||
|
} |
||||||
|
|
||||||
|
func (e *AccountLookupError) Error() string { |
||||||
|
if e.Name == "" { |
||||||
|
return "lookup account: empty account name specified" |
||||||
|
} |
||||||
|
var s string |
||||||
|
switch e.Err { |
||||||
|
case cERROR_NONE_MAPPED: |
||||||
|
s = "not found" |
||||||
|
default: |
||||||
|
s = e.Err.Error() |
||||||
|
} |
||||||
|
return "lookup account " + e.Name + ": " + s |
||||||
|
} |
||||||
|
|
||||||
|
type SddlConversionError struct { |
||||||
|
Sddl string |
||||||
|
Err error |
||||||
|
} |
||||||
|
|
||||||
|
func (e *SddlConversionError) Error() string { |
||||||
|
return "convert " + e.Sddl + ": " + e.Err.Error() |
||||||
|
} |
||||||
|
|
||||||
|
// LookupSidByName looks up the SID of an account by name
|
||||||
|
func LookupSidByName(name string) (sid string, err error) { |
||||||
|
if name == "" { |
||||||
|
return "", &AccountLookupError{name, cERROR_NONE_MAPPED} |
||||||
|
} |
||||||
|
|
||||||
|
var sidSize, sidNameUse, refDomainSize uint32 |
||||||
|
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) |
||||||
|
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { |
||||||
|
return "", &AccountLookupError{name, err} |
||||||
|
} |
||||||
|
sidBuffer := make([]byte, sidSize) |
||||||
|
refDomainBuffer := make([]uint16, refDomainSize) |
||||||
|
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) |
||||||
|
if err != nil { |
||||||
|
return "", &AccountLookupError{name, err} |
||||||
|
} |
||||||
|
var strBuffer *uint16 |
||||||
|
err = convertSidToStringSid(&sidBuffer[0], &strBuffer) |
||||||
|
if err != nil { |
||||||
|
return "", &AccountLookupError{name, err} |
||||||
|
} |
||||||
|
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) |
||||||
|
localFree(uintptr(unsafe.Pointer(strBuffer))) |
||||||
|
return sid, nil |
||||||
|
} |
||||||
|
|
||||||
|
func SddlToSecurityDescriptor(sddl string) ([]byte, error) { |
||||||
|
var sdBuffer uintptr |
||||||
|
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) |
||||||
|
if err != nil { |
||||||
|
return nil, &SddlConversionError{sddl, err} |
||||||
|
} |
||||||
|
defer localFree(sdBuffer) |
||||||
|
sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) |
||||||
|
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) |
||||||
|
return sd, nil |
||||||
|
} |
||||||
|
|
||||||
|
func SecurityDescriptorToSddl(sd []byte) (string, error) { |
||||||
|
var sddl *uint16 |
||||||
|
// The returned string length seems to including an aribtrary number of terminating NULs.
|
||||||
|
// Don't use it.
|
||||||
|
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
defer localFree(uintptr(unsafe.Pointer(sddl))) |
||||||
|
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil |
||||||
|
} |
@ -0,0 +1,3 @@ |
|||||||
|
package winio |
||||||
|
|
||||||
|
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
|
@ -0,0 +1,520 @@ |
|||||||
|
// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT
|
||||||
|
|
||||||
|
package winio |
||||||
|
|
||||||
|
import ( |
||||||
|
"syscall" |
||||||
|
"unsafe" |
||||||
|
|
||||||
|
"golang.org/x/sys/windows" |
||||||
|
) |
||||||
|
|
||||||
|
var _ unsafe.Pointer |
||||||
|
|
||||||
|
// Do the interface allocations only once for common
|
||||||
|
// Errno values.
|
||||||
|
const ( |
||||||
|
errnoERROR_IO_PENDING = 997 |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) |
||||||
|
) |
||||||
|
|
||||||
|
// errnoErr returns common boxed Errno values, to prevent
|
||||||
|
// allocations at runtime.
|
||||||
|
func errnoErr(e syscall.Errno) error { |
||||||
|
switch e { |
||||||
|
case 0: |
||||||
|
return nil |
||||||
|
case errnoERROR_IO_PENDING: |
||||||
|
return errERROR_IO_PENDING |
||||||
|
} |
||||||
|
// TODO: add more here, after collecting data on the common
|
||||||
|
// error values see on Windows. (perhaps when running
|
||||||
|
// all.bat?)
|
||||||
|
return e |
||||||
|
} |
||||||
|
|
||||||
|
var ( |
||||||
|
modkernel32 = windows.NewLazySystemDLL("kernel32.dll") |
||||||
|
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") |
||||||
|
|
||||||
|
procCancelIoEx = modkernel32.NewProc("CancelIoEx") |
||||||
|
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") |
||||||
|
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") |
||||||
|
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") |
||||||
|
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") |
||||||
|
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") |
||||||
|
procCreateFileW = modkernel32.NewProc("CreateFileW") |
||||||
|
procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") |
||||||
|
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") |
||||||
|
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") |
||||||
|
procLocalAlloc = modkernel32.NewProc("LocalAlloc") |
||||||
|
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") |
||||||
|
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") |
||||||
|
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") |
||||||
|
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") |
||||||
|
procLocalFree = modkernel32.NewProc("LocalFree") |
||||||
|
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") |
||||||
|
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") |
||||||
|
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") |
||||||
|
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") |
||||||
|
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") |
||||||
|
procRevertToSelf = modadvapi32.NewProc("RevertToSelf") |
||||||
|
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") |
||||||
|
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") |
||||||
|
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") |
||||||
|
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") |
||||||
|
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") |
||||||
|
procBackupRead = modkernel32.NewProc("BackupRead") |
||||||
|
procBackupWrite = modkernel32.NewProc("BackupWrite") |
||||||
|
) |
||||||
|
|
||||||
|
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { |
||||||
|
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) |
||||||
|
newport = syscall.Handle(r0) |
||||||
|
if newport == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { |
||||||
|
var _p0 *uint16 |
||||||
|
_p0, err = syscall.UTF16PtrFromString(name) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) |
||||||
|
} |
||||||
|
|
||||||
|
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { |
||||||
|
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) |
||||||
|
handle = syscall.Handle(r0) |
||||||
|
if handle == syscall.InvalidHandle { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { |
||||||
|
var _p0 *uint16 |
||||||
|
_p0, err = syscall.UTF16PtrFromString(name) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) |
||||||
|
} |
||||||
|
|
||||||
|
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { |
||||||
|
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) |
||||||
|
handle = syscall.Handle(r0) |
||||||
|
if handle == syscall.InvalidHandle { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func waitNamedPipe(name string, timeout uint32) (err error) { |
||||||
|
var _p0 *uint16 |
||||||
|
_p0, err = syscall.UTF16PtrFromString(name) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
return _waitNamedPipe(_p0, timeout) |
||||||
|
} |
||||||
|
|
||||||
|
func _waitNamedPipe(name *uint16, timeout uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { |
||||||
|
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) |
||||||
|
ptr = uintptr(r0) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { |
||||||
|
var _p0 *uint16 |
||||||
|
_p0, err = syscall.UTF16PtrFromString(accountName) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) |
||||||
|
} |
||||||
|
|
||||||
|
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func convertSidToStringSid(sid *byte, str **uint16) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { |
||||||
|
var _p0 *uint16 |
||||||
|
_p0, err = syscall.UTF16PtrFromString(str) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) |
||||||
|
} |
||||||
|
|
||||||
|
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func localFree(mem uintptr) { |
||||||
|
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func getSecurityDescriptorLength(sd uintptr) (len uint32) { |
||||||
|
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) |
||||||
|
len = uint32(r0) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { |
||||||
|
var _p0 uint32 |
||||||
|
if releaseAll { |
||||||
|
_p0 = 1 |
||||||
|
} else { |
||||||
|
_p0 = 0 |
||||||
|
} |
||||||
|
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) |
||||||
|
success = r0 != 0 |
||||||
|
if true { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func impersonateSelf(level uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func revertToSelf() (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { |
||||||
|
var _p0 uint32 |
||||||
|
if openAsSelf { |
||||||
|
_p0 = 1 |
||||||
|
} else { |
||||||
|
_p0 = 0 |
||||||
|
} |
||||||
|
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func getCurrentThread() (h syscall.Handle) { |
||||||
|
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) |
||||||
|
h = syscall.Handle(r0) |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { |
||||||
|
var _p0 *uint16 |
||||||
|
_p0, err = syscall.UTF16PtrFromString(systemName) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
var _p1 *uint16 |
||||||
|
_p1, err = syscall.UTF16PtrFromString(name) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
return _lookupPrivilegeValue(_p0, _p1, luid) |
||||||
|
} |
||||||
|
|
||||||
|
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { |
||||||
|
var _p0 *uint16 |
||||||
|
_p0, err = syscall.UTF16PtrFromString(systemName) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
return _lookupPrivilegeName(_p0, luid, buffer, size) |
||||||
|
} |
||||||
|
|
||||||
|
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { |
||||||
|
var _p0 *uint16 |
||||||
|
_p0, err = syscall.UTF16PtrFromString(systemName) |
||||||
|
if err != nil { |
||||||
|
return |
||||||
|
} |
||||||
|
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) |
||||||
|
} |
||||||
|
|
||||||
|
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { |
||||||
|
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { |
||||||
|
var _p0 *byte |
||||||
|
if len(b) > 0 { |
||||||
|
_p0 = &b[0] |
||||||
|
} |
||||||
|
var _p1 uint32 |
||||||
|
if abort { |
||||||
|
_p1 = 1 |
||||||
|
} else { |
||||||
|
_p1 = 0 |
||||||
|
} |
||||||
|
var _p2 uint32 |
||||||
|
if processSecurity { |
||||||
|
_p2 = 1 |
||||||
|
} else { |
||||||
|
_p2 = 0 |
||||||
|
} |
||||||
|
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { |
||||||
|
var _p0 *byte |
||||||
|
if len(b) > 0 { |
||||||
|
_p0 = &b[0] |
||||||
|
} |
||||||
|
var _p1 uint32 |
||||||
|
if abort { |
||||||
|
_p1 = 1 |
||||||
|
} else { |
||||||
|
_p1 = 0 |
||||||
|
} |
||||||
|
var _p2 uint32 |
||||||
|
if processSecurity { |
||||||
|
_p2 = 1 |
||||||
|
} else { |
||||||
|
_p2 = 0 |
||||||
|
} |
||||||
|
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) |
||||||
|
if r1 == 0 { |
||||||
|
if e1 != 0 { |
||||||
|
err = errnoErr(e1) |
||||||
|
} else { |
||||||
|
err = syscall.EINVAL |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
@ -0,0 +1,202 @@ |
|||||||
|
Apache License |
||||||
|
Version 2.0, January 2004 |
||||||
|
http://www.apache.org/licenses/ |
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||||
|
|
||||||
|
1. Definitions. |
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, |
||||||
|
and distribution as defined by Sections 1 through 9 of this document. |
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by |
||||||
|
the copyright owner that is granting the License. |
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all |
||||||
|
other entities that control, are controlled by, or are under common |
||||||
|
control with that entity. For the purposes of this definition, |
||||||
|
"control" means (i) the power, direct or indirect, to cause the |
||||||
|
direction or management of such entity, whether by contract or |
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity. |
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity |
||||||
|
exercising permissions granted by this License. |
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, |
||||||
|
including but not limited to software source code, documentation |
||||||
|
source, and configuration files. |
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical |
||||||
|
transformation or translation of a Source form, including but |
||||||
|
not limited to compiled object code, generated documentation, |
||||||
|
and conversions to other media types. |
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or |
||||||
|
Object form, made available under the License, as indicated by a |
||||||
|
copyright notice that is included in or attached to the work |
||||||
|
(an example is provided in the Appendix below). |
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object |
||||||
|
form, that is based on (or derived from) the Work and for which the |
||||||
|
editorial revisions, annotations, elaborations, or other modifications |
||||||
|
represent, as a whole, an original work of authorship. For the purposes |
||||||
|
of this License, Derivative Works shall not include works that remain |
||||||
|
separable from, or merely link (or bind by name) to the interfaces of, |
||||||
|
the Work and Derivative Works thereof. |
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including |
||||||
|
the original version of the Work and any modifications or additions |
||||||
|
to that Work or Derivative Works thereof, that is intentionally |
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner |
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of |
||||||
|
the copyright owner. For the purposes of this definition, "submitted" |
||||||
|
means any form of electronic, verbal, or written communication sent |
||||||
|
to the Licensor or its representatives, including but not limited to |
||||||
|
communication on electronic mailing lists, source code control systems, |
||||||
|
and issue tracking systems that are managed by, or on behalf of, the |
||||||
|
Licensor for the purpose of discussing and improving the Work, but |
||||||
|
excluding communication that is conspicuously marked or otherwise |
||||||
|
designated in writing by the copyright owner as "Not a Contribution." |
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||||
|
on behalf of whom a Contribution has been received by Licensor and |
||||||
|
subsequently incorporated within the Work. |
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
copyright license to reproduce, prepare Derivative Works of, |
||||||
|
publicly display, publicly perform, sublicense, and distribute the |
||||||
|
Work and such Derivative Works in Source or Object form. |
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
(except as stated in this section) patent license to make, have made, |
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||||
|
where such license applies only to those patent claims licensable |
||||||
|
by such Contributor that are necessarily infringed by their |
||||||
|
Contribution(s) alone or by combination of their Contribution(s) |
||||||
|
with the Work to which such Contribution(s) was submitted. If You |
||||||
|
institute patent litigation against any entity (including a |
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||||
|
or a Contribution incorporated within the Work constitutes direct |
||||||
|
or contributory patent infringement, then any patent licenses |
||||||
|
granted to You under this License for that Work shall terminate |
||||||
|
as of the date such litigation is filed. |
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the |
||||||
|
Work or Derivative Works thereof in any medium, with or without |
||||||
|
modifications, and in Source or Object form, provided that You |
||||||
|
meet the following conditions: |
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or |
||||||
|
Derivative Works a copy of this License; and |
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices |
||||||
|
stating that You changed the files; and |
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works |
||||||
|
that You distribute, all copyright, patent, trademark, and |
||||||
|
attribution notices from the Source form of the Work, |
||||||
|
excluding those notices that do not pertain to any part of |
||||||
|
the Derivative Works; and |
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its |
||||||
|
distribution, then any Derivative Works that You distribute must |
||||||
|
include a readable copy of the attribution notices contained |
||||||
|
within such NOTICE file, excluding those notices that do not |
||||||
|
pertain to any part of the Derivative Works, in at least one |
||||||
|
of the following places: within a NOTICE text file distributed |
||||||
|
as part of the Derivative Works; within the Source form or |
||||||
|
documentation, if provided along with the Derivative Works; or, |
||||||
|
within a display generated by the Derivative Works, if and |
||||||
|
wherever such third-party notices normally appear. The contents |
||||||
|
of the NOTICE file are for informational purposes only and |
||||||
|
do not modify the License. You may add Your own attribution |
||||||
|
notices within Derivative Works that You distribute, alongside |
||||||
|
or as an addendum to the NOTICE text from the Work, provided |
||||||
|
that such additional attribution notices cannot be construed |
||||||
|
as modifying the License. |
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and |
||||||
|
may provide additional or different license terms and conditions |
||||||
|
for use, reproduction, or distribution of Your modifications, or |
||||||
|
for any such Derivative Works as a whole, provided Your use, |
||||||
|
reproduction, and distribution of the Work otherwise complies with |
||||||
|
the conditions stated in this License. |
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||||
|
any Contribution intentionally submitted for inclusion in the Work |
||||||
|
by You to the Licensor shall be under the terms and conditions of |
||||||
|
this License, without any additional terms or conditions. |
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify |
||||||
|
the terms of any separate license agreement you may have executed |
||||||
|
with Licensor regarding such Contributions. |
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade |
||||||
|
names, trademarks, service marks, or product names of the Licensor, |
||||||
|
except as required for reasonable and customary use in describing the |
||||||
|
origin of the Work and reproducing the content of the NOTICE file. |
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or |
||||||
|
agreed to in writing, Licensor provides the Work (and each |
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||||
|
implied, including, without limitation, any warranties or conditions |
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||||
|
appropriateness of using or redistributing the Work and assume any |
||||||
|
risks associated with Your exercise of permissions under this License. |
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory, |
||||||
|
whether in tort (including negligence), contract, or otherwise, |
||||||
|
unless required by applicable law (such as deliberate and grossly |
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be |
||||||
|
liable to You for damages, including any direct, indirect, special, |
||||||
|
incidental, or consequential damages of any character arising as a |
||||||
|
result of this License or out of the use or inability to use the |
||||||
|
Work (including but not limited to damages for loss of goodwill, |
||||||
|
work stoppage, computer failure or malfunction, or any and all |
||||||
|
other commercial damages or losses), even if such Contributor |
||||||
|
has been advised of the possibility of such damages. |
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing |
||||||
|
the Work or Derivative Works thereof, You may choose to offer, |
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity, |
||||||
|
or other liability obligations and/or rights consistent with this |
||||||
|
License. However, in accepting such obligations, You may act only |
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf |
||||||
|
of any other Contributor, and only if You agree to indemnify, |
||||||
|
defend, and hold each Contributor harmless for any liability |
||||||
|
incurred by, or claims asserted against, such Contributor by reason |
||||||
|
of your accepting any such warranty or additional liability. |
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS |
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work. |
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following |
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}" |
||||||
|
replaced with your own identifying information. (Don't include |
||||||
|
the brackets!) The text should be enclosed in the appropriate |
||||||
|
comment syntax for the file format. We also recommend that a |
||||||
|
file or class name and description of purpose be included on the |
||||||
|
same "printed page" as the copyright notice for easier |
||||||
|
identification within third-party archives. |
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner} |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
||||||
|
|
@ -0,0 +1,247 @@ |
|||||||
|
package digestset |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"sort" |
||||||
|
"strings" |
||||||
|
"sync" |
||||||
|
|
||||||
|
digest "github.com/opencontainers/go-digest" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
// ErrDigestNotFound is used when a matching digest
|
||||||
|
// could not be found in a set.
|
||||||
|
ErrDigestNotFound = errors.New("digest not found") |
||||||
|
|
||||||
|
// ErrDigestAmbiguous is used when multiple digests
|
||||||
|
// are found in a set. None of the matching digests
|
||||||
|
// should be considered valid matches.
|
||||||
|
ErrDigestAmbiguous = errors.New("ambiguous digest string") |
||||||
|
) |
||||||
|
|
||||||
|
// Set is used to hold a unique set of digests which
|
||||||
|
// may be easily referenced by easily referenced by a string
|
||||||
|
// representation of the digest as well as short representation.
|
||||||
|
// The uniqueness of the short representation is based on other
|
||||||
|
// digests in the set. If digests are omitted from this set,
|
||||||
|
// collisions in a larger set may not be detected, therefore it
|
||||||
|
// is important to always do short representation lookups on
|
||||||
|
// the complete set of digests. To mitigate collisions, an
|
||||||
|
// appropriately long short code should be used.
|
||||||
|
type Set struct { |
||||||
|
mutex sync.RWMutex |
||||||
|
entries digestEntries |
||||||
|
} |
||||||
|
|
||||||
|
// NewSet creates an empty set of digests
|
||||||
|
// which may have digests added.
|
||||||
|
func NewSet() *Set { |
||||||
|
return &Set{ |
||||||
|
entries: digestEntries{}, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// checkShortMatch checks whether two digests match as either whole
|
||||||
|
// values or short values. This function does not test equality,
|
||||||
|
// rather whether the second value could match against the first
|
||||||
|
// value.
|
||||||
|
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { |
||||||
|
if len(hex) == len(shortHex) { |
||||||
|
if hex != shortHex { |
||||||
|
return false |
||||||
|
} |
||||||
|
if len(shortAlg) > 0 && string(alg) != shortAlg { |
||||||
|
return false |
||||||
|
} |
||||||
|
} else if !strings.HasPrefix(hex, shortHex) { |
||||||
|
return false |
||||||
|
} else if len(shortAlg) > 0 && string(alg) != shortAlg { |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// Lookup looks for a digest matching the given string representation.
|
||||||
|
// If no digests could be found ErrDigestNotFound will be returned
|
||||||
|
// with an empty digest value. If multiple matches are found
|
||||||
|
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||||
|
func (dst *Set) Lookup(d string) (digest.Digest, error) { |
||||||
|
dst.mutex.RLock() |
||||||
|
defer dst.mutex.RUnlock() |
||||||
|
if len(dst.entries) == 0 { |
||||||
|
return "", ErrDigestNotFound |
||||||
|
} |
||||||
|
var ( |
||||||
|
searchFunc func(int) bool |
||||||
|
alg digest.Algorithm |
||||||
|
hex string |
||||||
|
) |
||||||
|
dgst, err := digest.Parse(d) |
||||||
|
if err == digest.ErrDigestInvalidFormat { |
||||||
|
hex = d |
||||||
|
searchFunc = func(i int) bool { |
||||||
|
return dst.entries[i].val >= d |
||||||
|
} |
||||||
|
} else { |
||||||
|
hex = dgst.Hex() |
||||||
|
alg = dgst.Algorithm() |
||||||
|
searchFunc = func(i int) bool { |
||||||
|
if dst.entries[i].val == hex { |
||||||
|
return dst.entries[i].alg >= alg |
||||||
|
} |
||||||
|
return dst.entries[i].val >= hex |
||||||
|
} |
||||||
|
} |
||||||
|
idx := sort.Search(len(dst.entries), searchFunc) |
||||||
|
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { |
||||||
|
return "", ErrDigestNotFound |
||||||
|
} |
||||||
|
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { |
||||||
|
return dst.entries[idx].digest, nil |
||||||
|
} |
||||||
|
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { |
||||||
|
return "", ErrDigestAmbiguous |
||||||
|
} |
||||||
|
|
||||||
|
return dst.entries[idx].digest, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Add adds the given digest to the set. An error will be returned
|
||||||
|
// if the given digest is invalid. If the digest already exists in the
|
||||||
|
// set, this operation will be a no-op.
|
||||||
|
func (dst *Set) Add(d digest.Digest) error { |
||||||
|
if err := d.Validate(); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
dst.mutex.Lock() |
||||||
|
defer dst.mutex.Unlock() |
||||||
|
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} |
||||||
|
searchFunc := func(i int) bool { |
||||||
|
if dst.entries[i].val == entry.val { |
||||||
|
return dst.entries[i].alg >= entry.alg |
||||||
|
} |
||||||
|
return dst.entries[i].val >= entry.val |
||||||
|
} |
||||||
|
idx := sort.Search(len(dst.entries), searchFunc) |
||||||
|
if idx == len(dst.entries) { |
||||||
|
dst.entries = append(dst.entries, entry) |
||||||
|
return nil |
||||||
|
} else if dst.entries[idx].digest == d { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
entries := append(dst.entries, nil) |
||||||
|
copy(entries[idx+1:], entries[idx:len(entries)-1]) |
||||||
|
entries[idx] = entry |
||||||
|
dst.entries = entries |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Remove removes the given digest from the set. An err will be
|
||||||
|
// returned if the given digest is invalid. If the digest does
|
||||||
|
// not exist in the set, this operation will be a no-op.
|
||||||
|
func (dst *Set) Remove(d digest.Digest) error { |
||||||
|
if err := d.Validate(); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
dst.mutex.Lock() |
||||||
|
defer dst.mutex.Unlock() |
||||||
|
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} |
||||||
|
searchFunc := func(i int) bool { |
||||||
|
if dst.entries[i].val == entry.val { |
||||||
|
return dst.entries[i].alg >= entry.alg |
||||||
|
} |
||||||
|
return dst.entries[i].val >= entry.val |
||||||
|
} |
||||||
|
idx := sort.Search(len(dst.entries), searchFunc) |
||||||
|
// Not found if idx is after or value at idx is not digest
|
||||||
|
if idx == len(dst.entries) || dst.entries[idx].digest != d { |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
entries := dst.entries |
||||||
|
copy(entries[idx:], entries[idx+1:]) |
||||||
|
entries = entries[:len(entries)-1] |
||||||
|
dst.entries = entries |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// All returns all the digests in the set
|
||||||
|
func (dst *Set) All() []digest.Digest { |
||||||
|
dst.mutex.RLock() |
||||||
|
defer dst.mutex.RUnlock() |
||||||
|
retValues := make([]digest.Digest, len(dst.entries)) |
||||||
|
for i := range dst.entries { |
||||||
|
retValues[i] = dst.entries[i].digest |
||||||
|
} |
||||||
|
|
||||||
|
return retValues |
||||||
|
} |
||||||
|
|
||||||
|
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||||
|
// length represents the minimum value, the maximum length may be the
|
||||||
|
// entire value of digest if uniqueness cannot be achieved without the
|
||||||
|
// full value. This function will attempt to make short codes as short
|
||||||
|
// as possible to be unique.
|
||||||
|
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { |
||||||
|
dst.mutex.RLock() |
||||||
|
defer dst.mutex.RUnlock() |
||||||
|
m := make(map[digest.Digest]string, len(dst.entries)) |
||||||
|
l := length |
||||||
|
resetIdx := 0 |
||||||
|
for i := 0; i < len(dst.entries); i++ { |
||||||
|
var short string |
||||||
|
extended := true |
||||||
|
for extended { |
||||||
|
extended = false |
||||||
|
if len(dst.entries[i].val) <= l { |
||||||
|
short = dst.entries[i].digest.String() |
||||||
|
} else { |
||||||
|
short = dst.entries[i].val[:l] |
||||||
|
for j := i + 1; j < len(dst.entries); j++ { |
||||||
|
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { |
||||||
|
if j > resetIdx { |
||||||
|
resetIdx = j |
||||||
|
} |
||||||
|
extended = true |
||||||
|
} else { |
||||||
|
break |
||||||
|
} |
||||||
|
} |
||||||
|
if extended { |
||||||
|
l++ |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
m[dst.entries[i].digest] = short |
||||||
|
if i >= resetIdx { |
||||||
|
l = length |
||||||
|
} |
||||||
|
} |
||||||
|
return m |
||||||
|
} |
||||||
|
|
||||||
|
type digestEntry struct { |
||||||
|
alg digest.Algorithm |
||||||
|
val string |
||||||
|
digest digest.Digest |
||||||
|
} |
||||||
|
|
||||||
|
type digestEntries []*digestEntry |
||||||
|
|
||||||
|
func (d digestEntries) Len() int { |
||||||
|
return len(d) |
||||||
|
} |
||||||
|
|
||||||
|
func (d digestEntries) Less(i, j int) bool { |
||||||
|
if d[i].val != d[j].val { |
||||||
|
return d[i].val < d[j].val |
||||||
|
} |
||||||
|
return d[i].alg < d[j].alg |
||||||
|
} |
||||||
|
|
||||||
|
func (d digestEntries) Swap(i, j int) { |
||||||
|
d[i], d[j] = d[j], d[i] |
||||||
|
} |
@ -0,0 +1,42 @@ |
|||||||
|
package reference |
||||||
|
|
||||||
|
import "path" |
||||||
|
|
||||||
|
// IsNameOnly returns true if reference only contains a repo name.
|
||||||
|
func IsNameOnly(ref Named) bool { |
||||||
|
if _, ok := ref.(NamedTagged); ok { |
||||||
|
return false |
||||||
|
} |
||||||
|
if _, ok := ref.(Canonical); ok { |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// FamiliarName returns the familiar name string
|
||||||
|
// for the given named, familiarizing if needed.
|
||||||
|
func FamiliarName(ref Named) string { |
||||||
|
if nn, ok := ref.(normalizedNamed); ok { |
||||||
|
return nn.Familiar().Name() |
||||||
|
} |
||||||
|
return ref.Name() |
||||||
|
} |
||||||
|
|
||||||
|
// FamiliarString returns the familiar string representation
|
||||||
|
// for the given reference, familiarizing if needed.
|
||||||
|
func FamiliarString(ref Reference) string { |
||||||
|
if nn, ok := ref.(normalizedNamed); ok { |
||||||
|
return nn.Familiar().String() |
||||||
|
} |
||||||
|
return ref.String() |
||||||
|
} |
||||||
|
|
||||||
|
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||||
|
// See https://godoc.org/path#Match for supported patterns.
|
||||||
|
func FamiliarMatch(pattern string, ref Reference) (bool, error) { |
||||||
|
matched, err := path.Match(pattern, FamiliarString(ref)) |
||||||
|
if namedRef, isNamed := ref.(Named); isNamed && !matched { |
||||||
|
matched, _ = path.Match(pattern, FamiliarName(namedRef)) |
||||||
|
} |
||||||
|
return matched, err |
||||||
|
} |
@ -0,0 +1,170 @@ |
|||||||
|
package reference |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"strings" |
||||||
|
|
||||||
|
"github.com/docker/distribution/digestset" |
||||||
|
"github.com/opencontainers/go-digest" |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
legacyDefaultDomain = "index.docker.io" |
||||||
|
defaultDomain = "docker.io" |
||||||
|
officialRepoName = "library" |
||||||
|
defaultTag = "latest" |
||||||
|
) |
||||||
|
|
||||||
|
// normalizedNamed represents a name which has been
|
||||||
|
// normalized and has a familiar form. A familiar name
|
||||||
|
// is what is used in Docker UI. An example normalized
|
||||||
|
// name is "docker.io/library/ubuntu" and corresponding
|
||||||
|
// familiar name of "ubuntu".
|
||||||
|
type normalizedNamed interface { |
||||||
|
Named |
||||||
|
Familiar() Named |
||||||
|
} |
||||||
|
|
||||||
|
// ParseNormalizedNamed parses a string into a named reference
|
||||||
|
// transforming a familiar name from Docker UI to a fully
|
||||||
|
// qualified reference. If the value may be an identifier
|
||||||
|
// use ParseAnyReference.
|
||||||
|
func ParseNormalizedNamed(s string) (Named, error) { |
||||||
|
if ok := anchoredIdentifierRegexp.MatchString(s); ok { |
||||||
|
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) |
||||||
|
} |
||||||
|
domain, remainder := splitDockerDomain(s) |
||||||
|
var remoteName string |
||||||
|
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { |
||||||
|
remoteName = remainder[:tagSep] |
||||||
|
} else { |
||||||
|
remoteName = remainder |
||||||
|
} |
||||||
|
if strings.ToLower(remoteName) != remoteName { |
||||||
|
return nil, errors.New("invalid reference format: repository name must be lowercase") |
||||||
|
} |
||||||
|
|
||||||
|
ref, err := Parse(domain + "/" + remainder) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
named, isNamed := ref.(Named) |
||||||
|
if !isNamed { |
||||||
|
return nil, fmt.Errorf("reference %s has no name", ref.String()) |
||||||
|
} |
||||||
|
return named, nil |
||||||
|
} |
||||||
|
|
||||||
|
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||||
|
// If no valid domain is found, the default domain is used. Repository name
|
||||||
|
// needs to be already validated before.
|
||||||
|
func splitDockerDomain(name string) (domain, remainder string) { |
||||||
|
i := strings.IndexRune(name, '/') |
||||||
|
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { |
||||||
|
domain, remainder = defaultDomain, name |
||||||
|
} else { |
||||||
|
domain, remainder = name[:i], name[i+1:] |
||||||
|
} |
||||||
|
if domain == legacyDefaultDomain { |
||||||
|
domain = defaultDomain |
||||||
|
} |
||||||
|
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { |
||||||
|
remainder = officialRepoName + "/" + remainder |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
// familiarizeName returns a shortened version of the name familiar
|
||||||
|
// to to the Docker UI. Familiar names have the default domain
|
||||||
|
// "docker.io" and "library/" repository prefix removed.
|
||||||
|
// For example, "docker.io/library/redis" will have the familiar
|
||||||
|
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||||
|
// Returns a familiarized named only reference.
|
||||||
|
func familiarizeName(named namedRepository) repository { |
||||||
|
repo := repository{ |
||||||
|
domain: named.Domain(), |
||||||
|
path: named.Path(), |
||||||
|
} |
||||||
|
|
||||||
|
if repo.domain == defaultDomain { |
||||||
|
repo.domain = "" |
||||||
|
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||||
|
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { |
||||||
|
repo.path = split[1] |
||||||
|
} |
||||||
|
} |
||||||
|
return repo |
||||||
|
} |
||||||
|
|
||||||
|
func (r reference) Familiar() Named { |
||||||
|
return reference{ |
||||||
|
namedRepository: familiarizeName(r.namedRepository), |
||||||
|
tag: r.tag, |
||||||
|
digest: r.digest, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (r repository) Familiar() Named { |
||||||
|
return familiarizeName(r) |
||||||
|
} |
||||||
|
|
||||||
|
func (t taggedReference) Familiar() Named { |
||||||
|
return taggedReference{ |
||||||
|
namedRepository: familiarizeName(t.namedRepository), |
||||||
|
tag: t.tag, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func (c canonicalReference) Familiar() Named { |
||||||
|
return canonicalReference{ |
||||||
|
namedRepository: familiarizeName(c.namedRepository), |
||||||
|
digest: c.digest, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||||
|
// a repo name.
|
||||||
|
func TagNameOnly(ref Named) Named { |
||||||
|
if IsNameOnly(ref) { |
||||||
|
namedTagged, err := WithTag(ref, defaultTag) |
||||||
|
if err != nil { |
||||||
|
// Default tag must be valid, to create a NamedTagged
|
||||||
|
// type with non-validated input the WithTag function
|
||||||
|
// should be used instead
|
||||||
|
panic(err) |
||||||
|
} |
||||||
|
return namedTagged |
||||||
|
} |
||||||
|
return ref |
||||||
|
} |
||||||
|
|
||||||
|
// ParseAnyReference parses a reference string as a possible identifier,
|
||||||
|
// full digest, or familiar name.
|
||||||
|
func ParseAnyReference(ref string) (Reference, error) { |
||||||
|
if ok := anchoredIdentifierRegexp.MatchString(ref); ok { |
||||||
|
return digestReference("sha256:" + ref), nil |
||||||
|
} |
||||||
|
if dgst, err := digest.Parse(ref); err == nil { |
||||||
|
return digestReference(dgst), nil |
||||||
|
} |
||||||
|
|
||||||
|
return ParseNormalizedNamed(ref) |
||||||
|
} |
||||||
|
|
||||||
|
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||||
|
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||||
|
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { |
||||||
|
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { |
||||||
|
dgst, err := ds.Lookup(ref) |
||||||
|
if err == nil { |
||||||
|
return digestReference(dgst), nil |
||||||
|
} |
||||||
|
} else { |
||||||
|
if dgst, err := digest.Parse(ref); err == nil { |
||||||
|
return digestReference(dgst), nil |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return ParseNormalizedNamed(ref) |
||||||
|
} |
@ -0,0 +1,433 @@ |
|||||||
|
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||||
|
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||||
|
//
|
||||||
|
// Grammar
|
||||||
|
//
|
||||||
|
// reference := name [ ":" tag ] [ "@" digest ]
|
||||||
|
// name := [domain '/'] path-component ['/' path-component]*
|
||||||
|
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||||
|
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||||
|
// port-number := /[0-9]+/
|
||||||
|
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||||
|
// alpha-numeric := /[a-z0-9]+/
|
||||||
|
// separator := /[_.]|__|[-]*/
|
||||||
|
//
|
||||||
|
// tag := /[\w][\w.-]{0,127}/
|
||||||
|
//
|
||||||
|
// digest := digest-algorithm ":" digest-hex
|
||||||
|
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||||
|
// digest-algorithm-separator := /[+.-_]/
|
||||||
|
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||||
|
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||||
|
//
|
||||||
|
// identifier := /[a-f0-9]{64}/
|
||||||
|
// short-identifier := /[a-f0-9]{6,64}/
|
||||||
|
package reference |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"strings" |
||||||
|
|
||||||
|
"github.com/opencontainers/go-digest" |
||||||
|
) |
||||||
|
|
||||||
|
const ( |
||||||
|
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||||
|
NameTotalLengthMax = 255 |
||||||
|
) |
||||||
|
|
||||||
|
var ( |
||||||
|
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||||
|
ErrReferenceInvalidFormat = errors.New("invalid reference format") |
||||||
|
|
||||||
|
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||||
|
ErrTagInvalidFormat = errors.New("invalid tag format") |
||||||
|
|
||||||
|
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||||
|
ErrDigestInvalidFormat = errors.New("invalid digest format") |
||||||
|
|
||||||
|
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||||
|
ErrNameContainsUppercase = errors.New("repository name must be lowercase") |
||||||
|
|
||||||
|
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||||
|
ErrNameEmpty = errors.New("repository name must have at least one component") |
||||||
|
|
||||||
|
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||||
|
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) |
||||||
|
|
||||||
|
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||||
|
ErrNameNotCanonical = errors.New("repository name must be canonical") |
||||||
|
) |
||||||
|
|
||||||
|
// Reference is an opaque object reference identifier that may include
|
||||||
|
// modifiers such as a hostname, name, tag, and digest.
|
||||||
|
type Reference interface { |
||||||
|
// String returns the full reference
|
||||||
|
String() string |
||||||
|
} |
||||||
|
|
||||||
|
// Field provides a wrapper type for resolving correct reference types when
|
||||||
|
// working with encoding.
|
||||||
|
type Field struct { |
||||||
|
reference Reference |
||||||
|
} |
||||||
|
|
||||||
|
// AsField wraps a reference in a Field for encoding.
|
||||||
|
func AsField(reference Reference) Field { |
||||||
|
return Field{reference} |
||||||
|
} |
||||||
|
|
||||||
|
// Reference unwraps the reference type from the field to
|
||||||
|
// return the Reference object. This object should be
|
||||||
|
// of the appropriate type to further check for different
|
||||||
|
// reference types.
|
||||||
|
func (f Field) Reference() Reference { |
||||||
|
return f.reference |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalText serializes the field to byte text which
|
||||||
|
// is the string of the reference.
|
||||||
|
func (f Field) MarshalText() (p []byte, err error) { |
||||||
|
return []byte(f.reference.String()), nil |
||||||
|
} |
||||||
|
|
||||||
|
// UnmarshalText parses text bytes by invoking the
|
||||||
|
// reference parser to ensure the appropriately
|
||||||
|
// typed reference object is wrapped by field.
|
||||||
|
func (f *Field) UnmarshalText(p []byte) error { |
||||||
|
r, err := Parse(string(p)) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
f.reference = r |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// Named is an object with a full name
|
||||||
|
type Named interface { |
||||||
|
Reference |
||||||
|
Name() string |
||||||
|
} |
||||||
|
|
||||||
|
// Tagged is an object which has a tag
|
||||||
|
type Tagged interface { |
||||||
|
Reference |
||||||
|
Tag() string |
||||||
|
} |
||||||
|
|
||||||
|
// NamedTagged is an object including a name and tag.
|
||||||
|
type NamedTagged interface { |
||||||
|
Named |
||||||
|
Tag() string |
||||||
|
} |
||||||
|
|
||||||
|
// Digested is an object which has a digest
|
||||||
|
// in which it can be referenced by
|
||||||
|
type Digested interface { |
||||||
|
Reference |
||||||
|
Digest() digest.Digest |
||||||
|
} |
||||||
|
|
||||||
|
// Canonical reference is an object with a fully unique
|
||||||
|
// name including a name with domain and digest
|
||||||
|
type Canonical interface { |
||||||
|
Named |
||||||
|
Digest() digest.Digest |
||||||
|
} |
||||||
|
|
||||||
|
// namedRepository is a reference to a repository with a name.
|
||||||
|
// A namedRepository has both domain and path components.
|
||||||
|
type namedRepository interface { |
||||||
|
Named |
||||||
|
Domain() string |
||||||
|
Path() string |
||||||
|
} |
||||||
|
|
||||||
|
// Domain returns the domain part of the Named reference
|
||||||
|
func Domain(named Named) string { |
||||||
|
if r, ok := named.(namedRepository); ok { |
||||||
|
return r.Domain() |
||||||
|
} |
||||||
|
domain, _ := splitDomain(named.Name()) |
||||||
|
return domain |
||||||
|
} |
||||||
|
|
||||||
|
// Path returns the name without the domain part of the Named reference
|
||||||
|
func Path(named Named) (name string) { |
||||||
|
if r, ok := named.(namedRepository); ok { |
||||||
|
return r.Path() |
||||||
|
} |
||||||
|
_, path := splitDomain(named.Name()) |
||||||
|
return path |
||||||
|
} |
||||||
|
|
||||||
|
func splitDomain(name string) (string, string) { |
||||||
|
match := anchoredNameRegexp.FindStringSubmatch(name) |
||||||
|
if len(match) != 3 { |
||||||
|
return "", name |
||||||
|
} |
||||||
|
return match[1], match[2] |
||||||
|
} |
||||||
|
|
||||||
|
// SplitHostname splits a named reference into a
|
||||||
|
// hostname and name string. If no valid hostname is
|
||||||
|
// found, the hostname is empty and the full value
|
||||||
|
// is returned as name
|
||||||
|
// DEPRECATED: Use Domain or Path
|
||||||
|
func SplitHostname(named Named) (string, string) { |
||||||
|
if r, ok := named.(namedRepository); ok { |
||||||
|
return r.Domain(), r.Path() |
||||||
|
} |
||||||
|
return splitDomain(named.Name()) |
||||||
|
} |
||||||
|
|
||||||
|
// Parse parses s and returns a syntactically valid Reference.
|
||||||
|
// If an error was encountered it is returned, along with a nil Reference.
|
||||||
|
// NOTE: Parse will not handle short digests.
|
||||||
|
func Parse(s string) (Reference, error) { |
||||||
|
matches := ReferenceRegexp.FindStringSubmatch(s) |
||||||
|
if matches == nil { |
||||||
|
if s == "" { |
||||||
|
return nil, ErrNameEmpty |
||||||
|
} |
||||||
|
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { |
||||||
|
return nil, ErrNameContainsUppercase |
||||||
|
} |
||||||
|
return nil, ErrReferenceInvalidFormat |
||||||
|
} |
||||||
|
|
||||||
|
if len(matches[1]) > NameTotalLengthMax { |
||||||
|
return nil, ErrNameTooLong |
||||||
|
} |
||||||
|
|
||||||
|
var repo repository |
||||||
|
|
||||||
|
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) |
||||||
|
if nameMatch != nil && len(nameMatch) == 3 { |
||||||
|
repo.domain = nameMatch[1] |
||||||
|
repo.path = nameMatch[2] |
||||||
|
} else { |
||||||
|
repo.domain = "" |
||||||
|
repo.path = matches[1] |
||||||
|
} |
||||||
|
|
||||||
|
ref := reference{ |
||||||
|
namedRepository: repo, |
||||||
|
tag: matches[2], |
||||||
|
} |
||||||
|
if matches[3] != "" { |
||||||
|
var err error |
||||||
|
ref.digest, err = digest.Parse(matches[3]) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
r := getBestReferenceType(ref) |
||||||
|
if r == nil { |
||||||
|
return nil, ErrNameEmpty |
||||||
|
} |
||||||
|
|
||||||
|
return r, nil |
||||||
|
} |
||||||
|
|
||||||
|
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||||
|
// the Named interface. The reference must have a name and be in the canonical
|
||||||
|
// form, otherwise an error is returned.
|
||||||
|
// If an error was encountered it is returned, along with a nil Reference.
|
||||||
|
// NOTE: ParseNamed will not handle short digests.
|
||||||
|
func ParseNamed(s string) (Named, error) { |
||||||
|
named, err := ParseNormalizedNamed(s) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
if named.String() != s { |
||||||
|
return nil, ErrNameNotCanonical |
||||||
|
} |
||||||
|
return named, nil |
||||||
|
} |
||||||
|
|
||||||
|
// WithName returns a named object representing the given string. If the input
|
||||||
|
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||||
|
func WithName(name string) (Named, error) { |
||||||
|
if len(name) > NameTotalLengthMax { |
||||||
|
return nil, ErrNameTooLong |
||||||
|
} |
||||||
|
|
||||||
|
match := anchoredNameRegexp.FindStringSubmatch(name) |
||||||
|
if match == nil || len(match) != 3 { |
||||||
|
return nil, ErrReferenceInvalidFormat |
||||||
|
} |
||||||
|
return repository{ |
||||||
|
domain: match[1], |
||||||
|
path: match[2], |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||||
|
// reference incorporating both the name and the tag.
|
||||||
|
func WithTag(name Named, tag string) (NamedTagged, error) { |
||||||
|
if !anchoredTagRegexp.MatchString(tag) { |
||||||
|
return nil, ErrTagInvalidFormat |
||||||
|
} |
||||||
|
var repo repository |
||||||
|
if r, ok := name.(namedRepository); ok { |
||||||
|
repo.domain = r.Domain() |
||||||
|
repo.path = r.Path() |
||||||
|
} else { |
||||||
|
repo.path = name.Name() |
||||||
|
} |
||||||
|
if canonical, ok := name.(Canonical); ok { |
||||||
|
return reference{ |
||||||
|
namedRepository: repo, |
||||||
|
tag: tag, |
||||||
|
digest: canonical.Digest(), |
||||||
|
}, nil |
||||||
|
} |
||||||
|
return taggedReference{ |
||||||
|
namedRepository: repo, |
||||||
|
tag: tag, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||||
|
// a reference incorporating both the name and the digest.
|
||||||
|
func WithDigest(name Named, digest digest.Digest) (Canonical, error) { |
||||||
|
if !anchoredDigestRegexp.MatchString(digest.String()) { |
||||||
|
return nil, ErrDigestInvalidFormat |
||||||
|
} |
||||||
|
var repo repository |
||||||
|
if r, ok := name.(namedRepository); ok { |
||||||
|
repo.domain = r.Domain() |
||||||
|
repo.path = r.Path() |
||||||
|
} else { |
||||||
|
repo.path = name.Name() |
||||||
|
} |
||||||
|
if tagged, ok := name.(Tagged); ok { |
||||||
|
return reference{ |
||||||
|
namedRepository: repo, |
||||||
|
tag: tagged.Tag(), |
||||||
|
digest: digest, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
return canonicalReference{ |
||||||
|
namedRepository: repo, |
||||||
|
digest: digest, |
||||||
|
}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// TrimNamed removes any tag or digest from the named reference.
|
||||||
|
func TrimNamed(ref Named) Named { |
||||||
|
domain, path := SplitHostname(ref) |
||||||
|
return repository{ |
||||||
|
domain: domain, |
||||||
|
path: path, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
func getBestReferenceType(ref reference) Reference { |
||||||
|
if ref.Name() == "" { |
||||||
|
// Allow digest only references
|
||||||
|
if ref.digest != "" { |
||||||
|
return digestReference(ref.digest) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
if ref.tag == "" { |
||||||
|
if ref.digest != "" { |
||||||
|
return canonicalReference{ |
||||||
|
namedRepository: ref.namedRepository, |
||||||
|
digest: ref.digest, |
||||||
|
} |
||||||
|
} |
||||||
|
return ref.namedRepository |
||||||
|
} |
||||||
|
if ref.digest == "" { |
||||||
|
return taggedReference{ |
||||||
|
namedRepository: ref.namedRepository, |
||||||
|
tag: ref.tag, |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return ref |
||||||
|
} |
||||||
|
|
||||||
|
type reference struct { |
||||||
|
namedRepository |
||||||
|
tag string |
||||||
|
digest digest.Digest |
||||||
|
} |
||||||
|
|
||||||
|
func (r reference) String() string { |
||||||
|
return r.Name() + ":" + r.tag + "@" + r.digest.String() |
||||||
|
} |
||||||
|
|
||||||
|
func (r reference) Tag() string { |
||||||
|
return r.tag |
||||||
|
} |
||||||
|
|
||||||
|
func (r reference) Digest() digest.Digest { |
||||||
|
return r.digest |
||||||
|
} |
||||||
|
|
||||||
|
type repository struct { |
||||||
|
domain string |
||||||
|
path string |
||||||
|
} |
||||||
|
|
||||||
|
func (r repository) String() string { |
||||||
|
return r.Name() |
||||||
|
} |
||||||
|
|
||||||
|
func (r repository) Name() string { |
||||||
|
if r.domain == "" { |
||||||
|
return r.path |
||||||
|
} |
||||||
|
return r.domain + "/" + r.path |
||||||
|
} |
||||||
|
|
||||||
|
func (r repository) Domain() string { |
||||||
|
return r.domain |
||||||
|
} |
||||||
|
|
||||||
|
func (r repository) Path() string { |
||||||
|
return r.path |
||||||
|
} |
||||||
|
|
||||||
|
type digestReference digest.Digest |
||||||
|
|
||||||
|
func (d digestReference) String() string { |
||||||
|
return digest.Digest(d).String() |
||||||
|
} |
||||||
|
|
||||||
|
func (d digestReference) Digest() digest.Digest { |
||||||
|
return digest.Digest(d) |
||||||
|
} |
||||||
|
|
||||||
|
type taggedReference struct { |
||||||
|
namedRepository |
||||||
|
tag string |
||||||
|
} |
||||||
|
|
||||||
|
func (t taggedReference) String() string { |
||||||
|
return t.Name() + ":" + t.tag |
||||||
|
} |
||||||
|
|
||||||
|
func (t taggedReference) Tag() string { |
||||||
|
return t.tag |
||||||
|
} |
||||||
|
|
||||||
|
type canonicalReference struct { |
||||||
|
namedRepository |
||||||
|
digest digest.Digest |
||||||
|
} |
||||||
|
|
||||||
|
func (c canonicalReference) String() string { |
||||||
|
return c.Name() + "@" + c.digest.String() |
||||||
|
} |
||||||
|
|
||||||
|
func (c canonicalReference) Digest() digest.Digest { |
||||||
|
return c.digest |
||||||
|
} |
@ -0,0 +1,143 @@ |
|||||||
|
package reference |
||||||
|
|
||||||
|
import "regexp" |
||||||
|
|
||||||
|
var ( |
||||||
|
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||||
|
// component of names. This only allows lower case characters and digits.
|
||||||
|
alphaNumericRegexp = match(`[a-z0-9]+`) |
||||||
|
|
||||||
|
// separatorRegexp defines the separators allowed to be embedded in name
|
||||||
|
// components. This allow one period, one or two underscore and multiple
|
||||||
|
// dashes.
|
||||||
|
separatorRegexp = match(`(?:[._]|__|[-]*)`) |
||||||
|
|
||||||
|
// nameComponentRegexp restricts registry path component names to start
|
||||||
|
// with at least one letter or number, with following parts able to be
|
||||||
|
// separated by one period, one or two underscore and multiple dashes.
|
||||||
|
nameComponentRegexp = expression( |
||||||
|
alphaNumericRegexp, |
||||||
|
optional(repeated(separatorRegexp, alphaNumericRegexp))) |
||||||
|
|
||||||
|
// domainComponentRegexp restricts the registry domain component of a
|
||||||
|
// repository name to start with a component as defined by DomainRegexp
|
||||||
|
// and followed by an optional port.
|
||||||
|
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) |
||||||
|
|
||||||
|
// DomainRegexp defines the structure of potential domain components
|
||||||
|
// that may be part of image names. This is purposely a subset of what is
|
||||||
|
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||||
|
// names.
|
||||||
|
DomainRegexp = expression( |
||||||
|
domainComponentRegexp, |
||||||
|
optional(repeated(literal(`.`), domainComponentRegexp)), |
||||||
|
optional(literal(`:`), match(`[0-9]+`))) |
||||||
|
|
||||||
|
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||||
|
TagRegexp = match(`[\w][\w.-]{0,127}`) |
||||||
|
|
||||||
|
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||||
|
// end of the matched string.
|
||||||
|
anchoredTagRegexp = anchored(TagRegexp) |
||||||
|
|
||||||
|
// DigestRegexp matches valid digests.
|
||||||
|
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) |
||||||
|
|
||||||
|
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||||
|
// end of the matched string.
|
||||||
|
anchoredDigestRegexp = anchored(DigestRegexp) |
||||||
|
|
||||||
|
// NameRegexp is the format for the name component of references. The
|
||||||
|
// regexp has capturing groups for the domain and name part omitting
|
||||||
|
// the separating forward slash from either.
|
||||||
|
NameRegexp = expression( |
||||||
|
optional(DomainRegexp, literal(`/`)), |
||||||
|
nameComponentRegexp, |
||||||
|
optional(repeated(literal(`/`), nameComponentRegexp))) |
||||||
|
|
||||||
|
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||||
|
// domain and trailing components.
|
||||||
|
anchoredNameRegexp = anchored( |
||||||
|
optional(capture(DomainRegexp), literal(`/`)), |
||||||
|
capture(nameComponentRegexp, |
||||||
|
optional(repeated(literal(`/`), nameComponentRegexp)))) |
||||||
|
|
||||||
|
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||||
|
// is anchored and has capturing groups for name, tag, and digest
|
||||||
|
// components.
|
||||||
|
ReferenceRegexp = anchored(capture(NameRegexp), |
||||||
|
optional(literal(":"), capture(TagRegexp)), |
||||||
|
optional(literal("@"), capture(DigestRegexp))) |
||||||
|
|
||||||
|
// IdentifierRegexp is the format for string identifier used as a
|
||||||
|
// content addressable identifier using sha256. These identifiers
|
||||||
|
// are like digests without the algorithm, since sha256 is used.
|
||||||
|
IdentifierRegexp = match(`([a-f0-9]{64})`) |
||||||
|
|
||||||
|
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||||
|
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||||
|
// within a list of trusted identifiers.
|
||||||
|
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) |
||||||
|
|
||||||
|
// anchoredIdentifierRegexp is used to check or match an
|
||||||
|
// identifier value, anchored at start and end of string.
|
||||||
|
anchoredIdentifierRegexp = anchored(IdentifierRegexp) |
||||||
|
|
||||||
|
// anchoredShortIdentifierRegexp is used to check if a value
|
||||||
|
// is a possible identifier prefix, anchored at start and end
|
||||||
|
// of string.
|
||||||
|
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) |
||||||
|
) |
||||||
|
|
||||||
|
// match compiles the string to a regular expression.
|
||||||
|
var match = regexp.MustCompile |
||||||
|
|
||||||
|
// literal compiles s into a literal regular expression, escaping any regexp
|
||||||
|
// reserved characters.
|
||||||
|
func literal(s string) *regexp.Regexp { |
||||||
|
re := match(regexp.QuoteMeta(s)) |
||||||
|
|
||||||
|
if _, complete := re.LiteralPrefix(); !complete { |
||||||
|
panic("must be a literal") |
||||||
|
} |
||||||
|
|
||||||
|
return re |
||||||
|
} |
||||||
|
|
||||||
|
// expression defines a full expression, where each regular expression must
|
||||||
|
// follow the previous.
|
||||||
|
func expression(res ...*regexp.Regexp) *regexp.Regexp { |
||||||
|
var s string |
||||||
|
for _, re := range res { |
||||||
|
s += re.String() |
||||||
|
} |
||||||
|
|
||||||
|
return match(s) |
||||||
|
} |
||||||
|
|
||||||
|
// optional wraps the expression in a non-capturing group and makes the
|
||||||
|
// production optional.
|
||||||
|
func optional(res ...*regexp.Regexp) *regexp.Regexp { |
||||||
|
return match(group(expression(res...)).String() + `?`) |
||||||
|
} |
||||||
|
|
||||||
|
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||||
|
// matches.
|
||||||
|
func repeated(res ...*regexp.Regexp) *regexp.Regexp { |
||||||
|
return match(group(expression(res...)).String() + `+`) |
||||||
|
} |
||||||
|
|
||||||
|
// group wraps the regexp in a non-capturing group.
|
||||||
|
func group(res ...*regexp.Regexp) *regexp.Regexp { |
||||||
|
return match(`(?:` + expression(res...).String() + `)`) |
||||||
|
} |
||||||
|
|
||||||
|
// capture wraps the expression in a capturing group.
|
||||||
|
func capture(res ...*regexp.Regexp) *regexp.Regexp { |
||||||
|
return match(`(` + expression(res...).String() + `)`) |
||||||
|
} |
||||||
|
|
||||||
|
// anchored anchors the regular expression by adding start and end delimiters.
|
||||||
|
func anchored(res ...*regexp.Regexp) *regexp.Regexp { |
||||||
|
return match(`^` + expression(res...).String() + `$`) |
||||||
|
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,191 @@ |
|||||||
|
|
||||||
|
Apache License |
||||||
|
Version 2.0, January 2004 |
||||||
|
https://www.apache.org/licenses/ |
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||||
|
|
||||||
|
1. Definitions. |
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, |
||||||
|
and distribution as defined by Sections 1 through 9 of this document. |
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by |
||||||
|
the copyright owner that is granting the License. |
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all |
||||||
|
other entities that control, are controlled by, or are under common |
||||||
|
control with that entity. For the purposes of this definition, |
||||||
|
"control" means (i) the power, direct or indirect, to cause the |
||||||
|
direction or management of such entity, whether by contract or |
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity. |
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity |
||||||
|
exercising permissions granted by this License. |
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, |
||||||
|
including but not limited to software source code, documentation |
||||||
|
source, and configuration files. |
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical |
||||||
|
transformation or translation of a Source form, including but |
||||||
|
not limited to compiled object code, generated documentation, |
||||||
|
and conversions to other media types. |
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or |
||||||
|
Object form, made available under the License, as indicated by a |
||||||
|
copyright notice that is included in or attached to the work |
||||||
|
(an example is provided in the Appendix below). |
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object |
||||||
|
form, that is based on (or derived from) the Work and for which the |
||||||
|
editorial revisions, annotations, elaborations, or other modifications |
||||||
|
represent, as a whole, an original work of authorship. For the purposes |
||||||
|
of this License, Derivative Works shall not include works that remain |
||||||
|
separable from, or merely link (or bind by name) to the interfaces of, |
||||||
|
the Work and Derivative Works thereof. |
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including |
||||||
|
the original version of the Work and any modifications or additions |
||||||
|
to that Work or Derivative Works thereof, that is intentionally |
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner |
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of |
||||||
|
the copyright owner. For the purposes of this definition, "submitted" |
||||||
|
means any form of electronic, verbal, or written communication sent |
||||||
|
to the Licensor or its representatives, including but not limited to |
||||||
|
communication on electronic mailing lists, source code control systems, |
||||||
|
and issue tracking systems that are managed by, or on behalf of, the |
||||||
|
Licensor for the purpose of discussing and improving the Work, but |
||||||
|
excluding communication that is conspicuously marked or otherwise |
||||||
|
designated in writing by the copyright owner as "Not a Contribution." |
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||||
|
on behalf of whom a Contribution has been received by Licensor and |
||||||
|
subsequently incorporated within the Work. |
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
copyright license to reproduce, prepare Derivative Works of, |
||||||
|
publicly display, publicly perform, sublicense, and distribute the |
||||||
|
Work and such Derivative Works in Source or Object form. |
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of |
||||||
|
this License, each Contributor hereby grants to You a perpetual, |
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||||
|
(except as stated in this section) patent license to make, have made, |
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||||
|
where such license applies only to those patent claims licensable |
||||||
|
by such Contributor that are necessarily infringed by their |
||||||
|
Contribution(s) alone or by combination of their Contribution(s) |
||||||
|
with the Work to which such Contribution(s) was submitted. If You |
||||||
|
institute patent litigation against any entity (including a |
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||||
|
or a Contribution incorporated within the Work constitutes direct |
||||||
|
or contributory patent infringement, then any patent licenses |
||||||
|
granted to You under this License for that Work shall terminate |
||||||
|
as of the date such litigation is filed. |
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the |
||||||
|
Work or Derivative Works thereof in any medium, with or without |
||||||
|
modifications, and in Source or Object form, provided that You |
||||||
|
meet the following conditions: |
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or |
||||||
|
Derivative Works a copy of this License; and |
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices |
||||||
|
stating that You changed the files; and |
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works |
||||||
|
that You distribute, all copyright, patent, trademark, and |
||||||
|
attribution notices from the Source form of the Work, |
||||||
|
excluding those notices that do not pertain to any part of |
||||||
|
the Derivative Works; and |
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its |
||||||
|
distribution, then any Derivative Works that You distribute must |
||||||
|
include a readable copy of the attribution notices contained |
||||||
|
within such NOTICE file, excluding those notices that do not |
||||||
|
pertain to any part of the Derivative Works, in at least one |
||||||
|
of the following places: within a NOTICE text file distributed |
||||||
|
as part of the Derivative Works; within the Source form or |
||||||
|
documentation, if provided along with the Derivative Works; or, |
||||||
|
within a display generated by the Derivative Works, if and |
||||||
|
wherever such third-party notices normally appear. The contents |
||||||
|
of the NOTICE file are for informational purposes only and |
||||||
|
do not modify the License. You may add Your own attribution |
||||||
|
notices within Derivative Works that You distribute, alongside |
||||||
|
or as an addendum to the NOTICE text from the Work, provided |
||||||
|
that such additional attribution notices cannot be construed |
||||||
|
as modifying the License. |
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and |
||||||
|
may provide additional or different license terms and conditions |
||||||
|
for use, reproduction, or distribution of Your modifications, or |
||||||
|
for any such Derivative Works as a whole, provided Your use, |
||||||
|
reproduction, and distribution of the Work otherwise complies with |
||||||
|
the conditions stated in this License. |
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||||
|
any Contribution intentionally submitted for inclusion in the Work |
||||||
|
by You to the Licensor shall be under the terms and conditions of |
||||||
|
this License, without any additional terms or conditions. |
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify |
||||||
|
the terms of any separate license agreement you may have executed |
||||||
|
with Licensor regarding such Contributions. |
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade |
||||||
|
names, trademarks, service marks, or product names of the Licensor, |
||||||
|
except as required for reasonable and customary use in describing the |
||||||
|
origin of the Work and reproducing the content of the NOTICE file. |
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or |
||||||
|
agreed to in writing, Licensor provides the Work (and each |
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||||
|
implied, including, without limitation, any warranties or conditions |
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||||
|
appropriateness of using or redistributing the Work and assume any |
||||||
|
risks associated with Your exercise of permissions under this License. |
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory, |
||||||
|
whether in tort (including negligence), contract, or otherwise, |
||||||
|
unless required by applicable law (such as deliberate and grossly |
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be |
||||||
|
liable to You for damages, including any direct, indirect, special, |
||||||
|
incidental, or consequential damages of any character arising as a |
||||||
|
result of this License or out of the use or inability to use the |
||||||
|
Work (including but not limited to damages for loss of goodwill, |
||||||
|
work stoppage, computer failure or malfunction, or any and all |
||||||
|
other commercial damages or losses), even if such Contributor |
||||||
|
has been advised of the possibility of such damages. |
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing |
||||||
|
the Work or Derivative Works thereof, You may choose to offer, |
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity, |
||||||
|
or other liability obligations and/or rights consistent with this |
||||||
|
License. However, in accepting such obligations, You may act only |
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf |
||||||
|
of any other Contributor, and only if You agree to indemnify, |
||||||
|
defend, and hold each Contributor harmless for any liability |
||||||
|
incurred by, or claims asserted against, such Contributor by reason |
||||||
|
of your accepting any such warranty or additional liability. |
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS |
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc. |
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); |
||||||
|
you may not use this file except in compliance with the License. |
||||||
|
You may obtain a copy of the License at |
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0 |
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software |
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, |
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||||
|
See the License for the specific language governing permissions and |
||||||
|
limitations under the License. |
@ -0,0 +1,19 @@ |
|||||||
|
Docker |
||||||
|
Copyright 2012-2016 Docker, Inc. |
||||||
|
|
||||||
|
This product includes software developed at Docker, Inc. (https://www.docker.com). |
||||||
|
|
||||||
|
This product contains software (https://github.com/kr/pty) developed |
||||||
|
by Keith Rarick, licensed under the MIT License. |
||||||
|
|
||||||
|
The following is courtesy of our legal counsel: |
||||||
|
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the |
||||||
|
United States and other governments. |
||||||
|
It is your responsibility to ensure that your use and/or transfer does not |
||||||
|
violate applicable laws. |
||||||
|
|
||||||
|
For more information, please see https://www.bis.doc.gov |
||||||
|
|
||||||
|
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. |
@ -0,0 +1,22 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// AuthConfig contains authorization information for connecting to a Registry
|
||||||
|
type AuthConfig struct { |
||||||
|
Username string `json:"username,omitempty"` |
||||||
|
Password string `json:"password,omitempty"` |
||||||
|
Auth string `json:"auth,omitempty"` |
||||||
|
|
||||||
|
// Email is an optional value associated with the username.
|
||||||
|
// This field is deprecated and will be removed in a later
|
||||||
|
// version of docker.
|
||||||
|
Email string `json:"email,omitempty"` |
||||||
|
|
||||||
|
ServerAddress string `json:"serveraddress,omitempty"` |
||||||
|
|
||||||
|
// IdentityToken is used to authenticate the user and get
|
||||||
|
// an access token for the registry.
|
||||||
|
IdentityToken string `json:"identitytoken,omitempty"` |
||||||
|
|
||||||
|
// RegistryToken is a bearer token to be sent to a registry
|
||||||
|
RegistryToken string `json:"registrytoken,omitempty"` |
||||||
|
} |
@ -0,0 +1,23 @@ |
|||||||
|
package blkiodev |
||||||
|
|
||||||
|
import "fmt" |
||||||
|
|
||||||
|
// WeightDevice is a structure that holds device:weight pair
|
||||||
|
type WeightDevice struct { |
||||||
|
Path string |
||||||
|
Weight uint16 |
||||||
|
} |
||||||
|
|
||||||
|
func (w *WeightDevice) String() string { |
||||||
|
return fmt.Sprintf("%s:%d", w.Path, w.Weight) |
||||||
|
} |
||||||
|
|
||||||
|
// ThrottleDevice is a structure that holds device:rate_per_second pair
|
||||||
|
type ThrottleDevice struct { |
||||||
|
Path string |
||||||
|
Rate uint64 |
||||||
|
} |
||||||
|
|
||||||
|
func (t *ThrottleDevice) String() string { |
||||||
|
return fmt.Sprintf("%s:%d", t.Path, t.Rate) |
||||||
|
} |
@ -0,0 +1,378 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
import ( |
||||||
|
"bufio" |
||||||
|
"io" |
||||||
|
"net" |
||||||
|
"os" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/container" |
||||||
|
"github.com/docker/docker/api/types/filters" |
||||||
|
"github.com/docker/go-units" |
||||||
|
) |
||||||
|
|
||||||
|
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
|
||||||
|
type CheckpointCreateOptions struct { |
||||||
|
CheckpointID string |
||||||
|
CheckpointDir string |
||||||
|
Exit bool |
||||||
|
} |
||||||
|
|
||||||
|
// CheckpointListOptions holds parameters to list checkpoints for a container
|
||||||
|
type CheckpointListOptions struct { |
||||||
|
CheckpointDir string |
||||||
|
} |
||||||
|
|
||||||
|
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
|
||||||
|
type CheckpointDeleteOptions struct { |
||||||
|
CheckpointID string |
||||||
|
CheckpointDir string |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerAttachOptions holds parameters to attach to a container.
|
||||||
|
type ContainerAttachOptions struct { |
||||||
|
Stream bool |
||||||
|
Stdin bool |
||||||
|
Stdout bool |
||||||
|
Stderr bool |
||||||
|
DetachKeys string |
||||||
|
Logs bool |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerCommitOptions holds parameters to commit changes into a container.
|
||||||
|
type ContainerCommitOptions struct { |
||||||
|
Reference string |
||||||
|
Comment string |
||||||
|
Author string |
||||||
|
Changes []string |
||||||
|
Pause bool |
||||||
|
Config *container.Config |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerExecInspect holds information returned by exec inspect.
|
||||||
|
type ContainerExecInspect struct { |
||||||
|
ExecID string |
||||||
|
ContainerID string |
||||||
|
Running bool |
||||||
|
ExitCode int |
||||||
|
Pid int |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerListOptions holds parameters to list containers with.
|
||||||
|
type ContainerListOptions struct { |
||||||
|
Quiet bool |
||||||
|
Size bool |
||||||
|
All bool |
||||||
|
Latest bool |
||||||
|
Since string |
||||||
|
Before string |
||||||
|
Limit int |
||||||
|
Filters filters.Args |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerLogsOptions holds parameters to filter logs with.
|
||||||
|
type ContainerLogsOptions struct { |
||||||
|
ShowStdout bool |
||||||
|
ShowStderr bool |
||||||
|
Since string |
||||||
|
Timestamps bool |
||||||
|
Follow bool |
||||||
|
Tail string |
||||||
|
Details bool |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerRemoveOptions holds parameters to remove containers.
|
||||||
|
type ContainerRemoveOptions struct { |
||||||
|
RemoveVolumes bool |
||||||
|
RemoveLinks bool |
||||||
|
Force bool |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerStartOptions holds parameters to start containers.
|
||||||
|
type ContainerStartOptions struct { |
||||||
|
CheckpointID string |
||||||
|
CheckpointDir string |
||||||
|
} |
||||||
|
|
||||||
|
// CopyToContainerOptions holds information
|
||||||
|
// about files to copy into a container
|
||||||
|
type CopyToContainerOptions struct { |
||||||
|
AllowOverwriteDirWithFile bool |
||||||
|
} |
||||||
|
|
||||||
|
// EventsOptions holds parameters to filter events with.
|
||||||
|
type EventsOptions struct { |
||||||
|
Since string |
||||||
|
Until string |
||||||
|
Filters filters.Args |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||||
|
type NetworkListOptions struct { |
||||||
|
Filters filters.Args |
||||||
|
} |
||||||
|
|
||||||
|
// HijackedResponse holds connection information for a hijacked request.
|
||||||
|
type HijackedResponse struct { |
||||||
|
Conn net.Conn |
||||||
|
Reader *bufio.Reader |
||||||
|
} |
||||||
|
|
||||||
|
// Close closes the hijacked connection and reader.
|
||||||
|
func (h *HijackedResponse) Close() { |
||||||
|
h.Conn.Close() |
||||||
|
} |
||||||
|
|
||||||
|
// CloseWriter is an interface that implements structs
|
||||||
|
// that close input streams to prevent from writing.
|
||||||
|
type CloseWriter interface { |
||||||
|
CloseWrite() error |
||||||
|
} |
||||||
|
|
||||||
|
// CloseWrite closes a readWriter for writing.
|
||||||
|
func (h *HijackedResponse) CloseWrite() error { |
||||||
|
if conn, ok := h.Conn.(CloseWriter); ok { |
||||||
|
return conn.CloseWrite() |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// ImageBuildOptions holds the information
|
||||||
|
// necessary to build images.
|
||||||
|
type ImageBuildOptions struct { |
||||||
|
Tags []string |
||||||
|
SuppressOutput bool |
||||||
|
RemoteContext string |
||||||
|
NoCache bool |
||||||
|
Remove bool |
||||||
|
ForceRemove bool |
||||||
|
PullParent bool |
||||||
|
Isolation container.Isolation |
||||||
|
CPUSetCPUs string |
||||||
|
CPUSetMems string |
||||||
|
CPUShares int64 |
||||||
|
CPUQuota int64 |
||||||
|
CPUPeriod int64 |
||||||
|
Memory int64 |
||||||
|
MemorySwap int64 |
||||||
|
CgroupParent string |
||||||
|
NetworkMode string |
||||||
|
ShmSize int64 |
||||||
|
Dockerfile string |
||||||
|
Ulimits []*units.Ulimit |
||||||
|
// See the parsing of buildArgs in api/server/router/build/build_routes.go
|
||||||
|
// for an explaination of why BuildArgs needs to use *string instead of
|
||||||
|
// just a string
|
||||||
|
BuildArgs map[string]*string |
||||||
|
AuthConfigs map[string]AuthConfig |
||||||
|
Context io.Reader |
||||||
|
Labels map[string]string |
||||||
|
// squash the resulting image's layers to the parent
|
||||||
|
// preserves the original image and creates a new one from the parent with all
|
||||||
|
// the changes applied to a single layer
|
||||||
|
Squash bool |
||||||
|
// CacheFrom specifies images that are used for matching cache. Images
|
||||||
|
// specified here do not need to have a valid parent chain to match cache.
|
||||||
|
CacheFrom []string |
||||||
|
SecurityOpt []string |
||||||
|
} |
||||||
|
|
||||||
|
// ImageBuildResponse holds information
|
||||||
|
// returned by a server after building
|
||||||
|
// an image.
|
||||||
|
type ImageBuildResponse struct { |
||||||
|
Body io.ReadCloser |
||||||
|
OSType string |
||||||
|
} |
||||||
|
|
||||||
|
// ImageCreateOptions holds information to create images.
|
||||||
|
type ImageCreateOptions struct { |
||||||
|
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||||
|
} |
||||||
|
|
||||||
|
// ImageImportSource holds source information for ImageImport
|
||||||
|
type ImageImportSource struct { |
||||||
|
Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName)
|
||||||
|
SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source)
|
||||||
|
} |
||||||
|
|
||||||
|
// ImageImportOptions holds information to import images from the client host.
|
||||||
|
type ImageImportOptions struct { |
||||||
|
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||||
|
Message string // Message is the message to tag the image with
|
||||||
|
Changes []string // Changes are the raw changes to apply to this image
|
||||||
|
} |
||||||
|
|
||||||
|
// ImageListOptions holds parameters to filter the list of images with.
|
||||||
|
type ImageListOptions struct { |
||||||
|
All bool |
||||||
|
Filters filters.Args |
||||||
|
} |
||||||
|
|
||||||
|
// ImageLoadResponse returns information to the client about a load process.
|
||||||
|
type ImageLoadResponse struct { |
||||||
|
// Body must be closed to avoid a resource leak
|
||||||
|
Body io.ReadCloser |
||||||
|
JSON bool |
||||||
|
} |
||||||
|
|
||||||
|
// ImagePullOptions holds information to pull images.
|
||||||
|
type ImagePullOptions struct { |
||||||
|
All bool |
||||||
|
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||||
|
PrivilegeFunc RequestPrivilegeFunc |
||||||
|
} |
||||||
|
|
||||||
|
// RequestPrivilegeFunc is a function interface that
|
||||||
|
// clients can supply to retry operations after
|
||||||
|
// getting an authorization error.
|
||||||
|
// This function returns the registry authentication
|
||||||
|
// header value in base 64 format, or an error
|
||||||
|
// if the privilege request fails.
|
||||||
|
type RequestPrivilegeFunc func() (string, error) |
||||||
|
|
||||||
|
//ImagePushOptions holds information to push images.
|
||||||
|
type ImagePushOptions ImagePullOptions |
||||||
|
|
||||||
|
// ImageRemoveOptions holds parameters to remove images.
|
||||||
|
type ImageRemoveOptions struct { |
||||||
|
Force bool |
||||||
|
PruneChildren bool |
||||||
|
} |
||||||
|
|
||||||
|
// ImageSearchOptions holds parameters to search images with.
|
||||||
|
type ImageSearchOptions struct { |
||||||
|
RegistryAuth string |
||||||
|
PrivilegeFunc RequestPrivilegeFunc |
||||||
|
Filters filters.Args |
||||||
|
Limit int |
||||||
|
} |
||||||
|
|
||||||
|
// ResizeOptions holds parameters to resize a tty.
|
||||||
|
// It can be used to resize container ttys and
|
||||||
|
// exec process ttys too.
|
||||||
|
type ResizeOptions struct { |
||||||
|
Height uint |
||||||
|
Width uint |
||||||
|
} |
||||||
|
|
||||||
|
// VersionResponse holds version information for the client and the server
|
||||||
|
type VersionResponse struct { |
||||||
|
Client *Version |
||||||
|
Server *Version |
||||||
|
} |
||||||
|
|
||||||
|
// ServerOK returns true when the client could connect to the docker server
|
||||||
|
// and parse the information received. It returns false otherwise.
|
||||||
|
func (v VersionResponse) ServerOK() bool { |
||||||
|
return v.Server != nil |
||||||
|
} |
||||||
|
|
||||||
|
// NodeListOptions holds parameters to list nodes with.
|
||||||
|
type NodeListOptions struct { |
||||||
|
Filters filters.Args |
||||||
|
} |
||||||
|
|
||||||
|
// NodeRemoveOptions holds parameters to remove nodes with.
|
||||||
|
type NodeRemoveOptions struct { |
||||||
|
Force bool |
||||||
|
} |
||||||
|
|
||||||
|
// ServiceCreateOptions contains the options to use when creating a service.
|
||||||
|
type ServiceCreateOptions struct { |
||||||
|
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||||
|
// use when updating the service.
|
||||||
|
//
|
||||||
|
// This field follows the format of the X-Registry-Auth header.
|
||||||
|
EncodedRegistryAuth string |
||||||
|
} |
||||||
|
|
||||||
|
// ServiceCreateResponse contains the information returned to a client
|
||||||
|
// on the creation of a new service.
|
||||||
|
type ServiceCreateResponse struct { |
||||||
|
// ID is the ID of the created service.
|
||||||
|
ID string |
||||||
|
// Warnings is a set of non-fatal warning messages to pass on to the user.
|
||||||
|
Warnings []string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Values for RegistryAuthFrom in ServiceUpdateOptions
|
||||||
|
const ( |
||||||
|
RegistryAuthFromSpec = "spec" |
||||||
|
RegistryAuthFromPreviousSpec = "previous-spec" |
||||||
|
) |
||||||
|
|
||||||
|
// ServiceUpdateOptions contains the options to be used for updating services.
|
||||||
|
type ServiceUpdateOptions struct { |
||||||
|
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||||
|
// use when updating the service.
|
||||||
|
//
|
||||||
|
// This field follows the format of the X-Registry-Auth header.
|
||||||
|
EncodedRegistryAuth string |
||||||
|
|
||||||
|
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
|
||||||
|
// into this field. While it does open API users up to racy writes, most
|
||||||
|
// users may not need that level of consistency in practice.
|
||||||
|
|
||||||
|
// RegistryAuthFrom specifies where to find the registry authorization
|
||||||
|
// credentials if they are not given in EncodedRegistryAuth. Valid
|
||||||
|
// values are "spec" and "previous-spec".
|
||||||
|
RegistryAuthFrom string |
||||||
|
} |
||||||
|
|
||||||
|
// ServiceListOptions holds parameters to list services with.
|
||||||
|
type ServiceListOptions struct { |
||||||
|
Filters filters.Args |
||||||
|
} |
||||||
|
|
||||||
|
// TaskListOptions holds parameters to list tasks with.
|
||||||
|
type TaskListOptions struct { |
||||||
|
Filters filters.Args |
||||||
|
} |
||||||
|
|
||||||
|
// PluginRemoveOptions holds parameters to remove plugins.
|
||||||
|
type PluginRemoveOptions struct { |
||||||
|
Force bool |
||||||
|
} |
||||||
|
|
||||||
|
// PluginEnableOptions holds parameters to enable plugins.
|
||||||
|
type PluginEnableOptions struct { |
||||||
|
Timeout int |
||||||
|
} |
||||||
|
|
||||||
|
// PluginDisableOptions holds parameters to disable plugins.
|
||||||
|
type PluginDisableOptions struct { |
||||||
|
Force bool |
||||||
|
} |
||||||
|
|
||||||
|
// PluginInstallOptions holds parameters to install a plugin.
|
||||||
|
type PluginInstallOptions struct { |
||||||
|
Disabled bool |
||||||
|
AcceptAllPermissions bool |
||||||
|
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||||
|
RemoteRef string // RemoteRef is the plugin name on the registry
|
||||||
|
PrivilegeFunc RequestPrivilegeFunc |
||||||
|
AcceptPermissionsFunc func(PluginPrivileges) (bool, error) |
||||||
|
Args []string |
||||||
|
} |
||||||
|
|
||||||
|
// SecretRequestOption is a type for requesting secrets
|
||||||
|
type SecretRequestOption struct { |
||||||
|
Source string |
||||||
|
Target string |
||||||
|
UID string |
||||||
|
GID string |
||||||
|
Mode os.FileMode |
||||||
|
} |
||||||
|
|
||||||
|
// SwarmUnlockKeyResponse contains the response for Engine API:
|
||||||
|
// GET /swarm/unlockkey
|
||||||
|
type SwarmUnlockKeyResponse struct { |
||||||
|
// UnlockKey is the unlock key in ASCII-armored format.
|
||||||
|
UnlockKey string |
||||||
|
} |
||||||
|
|
||||||
|
// PluginCreateOptions hold all options to plugin create.
|
||||||
|
type PluginCreateOptions struct { |
||||||
|
RepoName string |
||||||
|
} |
@ -0,0 +1,69 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/docker/docker/api/types/container" |
||||||
|
"github.com/docker/docker/api/types/network" |
||||||
|
) |
||||||
|
|
||||||
|
// configs holds structs used for internal communication between the
|
||||||
|
// frontend (such as an http server) and the backend (such as the
|
||||||
|
// docker daemon).
|
||||||
|
|
||||||
|
// ContainerCreateConfig is the parameter set to ContainerCreate()
|
||||||
|
type ContainerCreateConfig struct { |
||||||
|
Name string |
||||||
|
Config *container.Config |
||||||
|
HostConfig *container.HostConfig |
||||||
|
NetworkingConfig *network.NetworkingConfig |
||||||
|
AdjustCPUShares bool |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerRmConfig holds arguments for the container remove
|
||||||
|
// operation. This struct is used to tell the backend what operations
|
||||||
|
// to perform.
|
||||||
|
type ContainerRmConfig struct { |
||||||
|
ForceRemove, RemoveVolume, RemoveLink bool |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerCommitConfig contains build configs for commit operation,
|
||||||
|
// and is used when making a commit with the current state of the container.
|
||||||
|
type ContainerCommitConfig struct { |
||||||
|
Pause bool |
||||||
|
Repo string |
||||||
|
Tag string |
||||||
|
Author string |
||||||
|
Comment string |
||||||
|
// merge container config into commit config before commit
|
||||||
|
MergeConfigs bool |
||||||
|
Config *container.Config |
||||||
|
} |
||||||
|
|
||||||
|
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||||
|
// for the exec feature of docker.
|
||||||
|
type ExecConfig struct { |
||||||
|
User string // User that will run the command
|
||||||
|
Privileged bool // Is the container in privileged mode
|
||||||
|
Tty bool // Attach standard streams to a tty.
|
||||||
|
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||||
|
AttachStderr bool // Attach the standard error
|
||||||
|
AttachStdout bool // Attach the standard output
|
||||||
|
Detach bool // Execute in detach mode
|
||||||
|
DetachKeys string // Escape keys for detach
|
||||||
|
Env []string // Environment variables
|
||||||
|
Cmd []string // Execution commands and args
|
||||||
|
} |
||||||
|
|
||||||
|
// PluginRmConfig holds arguments for plugin remove.
|
||||||
|
type PluginRmConfig struct { |
||||||
|
ForceRemove bool |
||||||
|
} |
||||||
|
|
||||||
|
// PluginEnableConfig holds arguments for plugin enable
|
||||||
|
type PluginEnableConfig struct { |
||||||
|
Timeout int |
||||||
|
} |
||||||
|
|
||||||
|
// PluginDisableConfig holds arguments for plugin disable.
|
||||||
|
type PluginDisableConfig struct { |
||||||
|
ForceDisable bool |
||||||
|
} |
@ -0,0 +1,62 @@ |
|||||||
|
package container |
||||||
|
|
||||||
|
import ( |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/strslice" |
||||||
|
"github.com/docker/go-connections/nat" |
||||||
|
) |
||||||
|
|
||||||
|
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||||
|
type HealthConfig struct { |
||||||
|
// Test is the test to perform to check that the container is healthy.
|
||||||
|
// An empty slice means to inherit the default.
|
||||||
|
// The options are:
|
||||||
|
// {} : inherit healthcheck
|
||||||
|
// {"NONE"} : disable healthcheck
|
||||||
|
// {"CMD", args...} : exec arguments directly
|
||||||
|
// {"CMD-SHELL", command} : run command with system's default shell
|
||||||
|
Test []string `json:",omitempty"` |
||||||
|
|
||||||
|
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||||
|
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||||
|
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||||
|
|
||||||
|
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||||
|
// Zero means inherit.
|
||||||
|
Retries int `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Config contains the configuration data about a container.
|
||||||
|
// It should hold only portable information about the container.
|
||||||
|
// Here, "portable" means "independent from the host we are running on".
|
||||||
|
// Non-portable information *should* appear in HostConfig.
|
||||||
|
// All fields added to this struct must be marked `omitempty` to keep getting
|
||||||
|
// predictable hashes from the old `v1Compatibility` configuration.
|
||||||
|
type Config struct { |
||||||
|
Hostname string // Hostname
|
||||||
|
Domainname string // Domainname
|
||||||
|
User string // User that will run the command(s) inside the container, also support user:group
|
||||||
|
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||||
|
AttachStdout bool // Attach the standard output
|
||||||
|
AttachStderr bool // Attach the standard error
|
||||||
|
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
|
||||||
|
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
|
||||||
|
OpenStdin bool // Open stdin
|
||||||
|
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
|
||||||
|
Env []string // List of environment variable to set in the container
|
||||||
|
Cmd strslice.StrSlice // Command to run when starting the container
|
||||||
|
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
||||||
|
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
|
||||||
|
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
|
||||||
|
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
||||||
|
WorkingDir string // Current directory (PWD) in the command will be launched
|
||||||
|
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
|
||||||
|
NetworkDisabled bool `json:",omitempty"` // Is network disabled
|
||||||
|
MacAddress string `json:",omitempty"` // Mac Address of the container
|
||||||
|
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
|
||||||
|
Labels map[string]string // List of labels set to this container
|
||||||
|
StopSignal string `json:",omitempty"` // Signal to stop a container
|
||||||
|
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
|
||||||
|
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
|
||||||
|
} |
@ -0,0 +1,21 @@ |
|||||||
|
package container |
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// DO NOT EDIT THIS FILE
|
||||||
|
// This file was generated by `swagger generate operation`
|
||||||
|
//
|
||||||
|
// See hack/swagger-gen.sh
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// ContainerCreateCreatedBody container create created body
|
||||||
|
// swagger:model ContainerCreateCreatedBody
|
||||||
|
type ContainerCreateCreatedBody struct { |
||||||
|
|
||||||
|
// The ID of the created container
|
||||||
|
// Required: true
|
||||||
|
ID string `json:"Id"` |
||||||
|
|
||||||
|
// Warnings encountered when creating the container
|
||||||
|
// Required: true
|
||||||
|
Warnings []string `json:"Warnings"` |
||||||
|
} |
@ -0,0 +1,17 @@ |
|||||||
|
package container |
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// DO NOT EDIT THIS FILE
|
||||||
|
// This file was generated by `swagger generate operation`
|
||||||
|
//
|
||||||
|
// See hack/swagger-gen.sh
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// ContainerUpdateOKBody container update o k body
|
||||||
|
// swagger:model ContainerUpdateOKBody
|
||||||
|
type ContainerUpdateOKBody struct { |
||||||
|
|
||||||
|
// warnings
|
||||||
|
// Required: true
|
||||||
|
Warnings []string `json:"Warnings"` |
||||||
|
} |
@ -0,0 +1,17 @@ |
|||||||
|
package container |
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// DO NOT EDIT THIS FILE
|
||||||
|
// This file was generated by `swagger generate operation`
|
||||||
|
//
|
||||||
|
// See hack/swagger-gen.sh
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// ContainerWaitOKBody container wait o k body
|
||||||
|
// swagger:model ContainerWaitOKBody
|
||||||
|
type ContainerWaitOKBody struct { |
||||||
|
|
||||||
|
// Exit code of the container
|
||||||
|
// Required: true
|
||||||
|
StatusCode int64 `json:"StatusCode"` |
||||||
|
} |
@ -0,0 +1,333 @@ |
|||||||
|
package container |
||||||
|
|
||||||
|
import ( |
||||||
|
"strings" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/blkiodev" |
||||||
|
"github.com/docker/docker/api/types/mount" |
||||||
|
"github.com/docker/docker/api/types/strslice" |
||||||
|
"github.com/docker/go-connections/nat" |
||||||
|
"github.com/docker/go-units" |
||||||
|
) |
||||||
|
|
||||||
|
// NetworkMode represents the container network stack.
|
||||||
|
type NetworkMode string |
||||||
|
|
||||||
|
// Isolation represents the isolation technology of a container. The supported
|
||||||
|
// values are platform specific
|
||||||
|
type Isolation string |
||||||
|
|
||||||
|
// IsDefault indicates the default isolation technology of a container. On Linux this
|
||||||
|
// is the native driver. On Windows, this is a Windows Server Container.
|
||||||
|
func (i Isolation) IsDefault() bool { |
||||||
|
return strings.ToLower(string(i)) == "default" || string(i) == "" |
||||||
|
} |
||||||
|
|
||||||
|
// IpcMode represents the container ipc stack.
|
||||||
|
type IpcMode string |
||||||
|
|
||||||
|
// IsPrivate indicates whether the container uses its private ipc stack.
|
||||||
|
func (n IpcMode) IsPrivate() bool { |
||||||
|
return !(n.IsHost() || n.IsContainer()) |
||||||
|
} |
||||||
|
|
||||||
|
// IsHost indicates whether the container uses the host's ipc stack.
|
||||||
|
func (n IpcMode) IsHost() bool { |
||||||
|
return n == "host" |
||||||
|
} |
||||||
|
|
||||||
|
// IsContainer indicates whether the container uses a container's ipc stack.
|
||||||
|
func (n IpcMode) IsContainer() bool { |
||||||
|
parts := strings.SplitN(string(n), ":", 2) |
||||||
|
return len(parts) > 1 && parts[0] == "container" |
||||||
|
} |
||||||
|
|
||||||
|
// Valid indicates whether the ipc stack is valid.
|
||||||
|
func (n IpcMode) Valid() bool { |
||||||
|
parts := strings.Split(string(n), ":") |
||||||
|
switch mode := parts[0]; mode { |
||||||
|
case "", "host": |
||||||
|
case "container": |
||||||
|
if len(parts) != 2 || parts[1] == "" { |
||||||
|
return false |
||||||
|
} |
||||||
|
default: |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// Container returns the name of the container ipc stack is going to be used.
|
||||||
|
func (n IpcMode) Container() string { |
||||||
|
parts := strings.SplitN(string(n), ":", 2) |
||||||
|
if len(parts) > 1 { |
||||||
|
return parts[1] |
||||||
|
} |
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
// UsernsMode represents userns mode in the container.
|
||||||
|
type UsernsMode string |
||||||
|
|
||||||
|
// IsHost indicates whether the container uses the host's userns.
|
||||||
|
func (n UsernsMode) IsHost() bool { |
||||||
|
return n == "host" |
||||||
|
} |
||||||
|
|
||||||
|
// IsPrivate indicates whether the container uses the a private userns.
|
||||||
|
func (n UsernsMode) IsPrivate() bool { |
||||||
|
return !(n.IsHost()) |
||||||
|
} |
||||||
|
|
||||||
|
// Valid indicates whether the userns is valid.
|
||||||
|
func (n UsernsMode) Valid() bool { |
||||||
|
parts := strings.Split(string(n), ":") |
||||||
|
switch mode := parts[0]; mode { |
||||||
|
case "", "host": |
||||||
|
default: |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// CgroupSpec represents the cgroup to use for the container.
|
||||||
|
type CgroupSpec string |
||||||
|
|
||||||
|
// IsContainer indicates whether the container is using another container cgroup
|
||||||
|
func (c CgroupSpec) IsContainer() bool { |
||||||
|
parts := strings.SplitN(string(c), ":", 2) |
||||||
|
return len(parts) > 1 && parts[0] == "container" |
||||||
|
} |
||||||
|
|
||||||
|
// Valid indicates whether the cgroup spec is valid.
|
||||||
|
func (c CgroupSpec) Valid() bool { |
||||||
|
return c.IsContainer() || c == "" |
||||||
|
} |
||||||
|
|
||||||
|
// Container returns the name of the container whose cgroup will be used.
|
||||||
|
func (c CgroupSpec) Container() string { |
||||||
|
parts := strings.SplitN(string(c), ":", 2) |
||||||
|
if len(parts) > 1 { |
||||||
|
return parts[1] |
||||||
|
} |
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
// UTSMode represents the UTS namespace of the container.
|
||||||
|
type UTSMode string |
||||||
|
|
||||||
|
// IsPrivate indicates whether the container uses its private UTS namespace.
|
||||||
|
func (n UTSMode) IsPrivate() bool { |
||||||
|
return !(n.IsHost()) |
||||||
|
} |
||||||
|
|
||||||
|
// IsHost indicates whether the container uses the host's UTS namespace.
|
||||||
|
func (n UTSMode) IsHost() bool { |
||||||
|
return n == "host" |
||||||
|
} |
||||||
|
|
||||||
|
// Valid indicates whether the UTS namespace is valid.
|
||||||
|
func (n UTSMode) Valid() bool { |
||||||
|
parts := strings.Split(string(n), ":") |
||||||
|
switch mode := parts[0]; mode { |
||||||
|
case "", "host": |
||||||
|
default: |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// PidMode represents the pid namespace of the container.
|
||||||
|
type PidMode string |
||||||
|
|
||||||
|
// IsPrivate indicates whether the container uses its own new pid namespace.
|
||||||
|
func (n PidMode) IsPrivate() bool { |
||||||
|
return !(n.IsHost() || n.IsContainer()) |
||||||
|
} |
||||||
|
|
||||||
|
// IsHost indicates whether the container uses the host's pid namespace.
|
||||||
|
func (n PidMode) IsHost() bool { |
||||||
|
return n == "host" |
||||||
|
} |
||||||
|
|
||||||
|
// IsContainer indicates whether the container uses a container's pid namespace.
|
||||||
|
func (n PidMode) IsContainer() bool { |
||||||
|
parts := strings.SplitN(string(n), ":", 2) |
||||||
|
return len(parts) > 1 && parts[0] == "container" |
||||||
|
} |
||||||
|
|
||||||
|
// Valid indicates whether the pid namespace is valid.
|
||||||
|
func (n PidMode) Valid() bool { |
||||||
|
parts := strings.Split(string(n), ":") |
||||||
|
switch mode := parts[0]; mode { |
||||||
|
case "", "host": |
||||||
|
case "container": |
||||||
|
if len(parts) != 2 || parts[1] == "" { |
||||||
|
return false |
||||||
|
} |
||||||
|
default: |
||||||
|
return false |
||||||
|
} |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// Container returns the name of the container whose pid namespace is going to be used.
|
||||||
|
func (n PidMode) Container() string { |
||||||
|
parts := strings.SplitN(string(n), ":", 2) |
||||||
|
if len(parts) > 1 { |
||||||
|
return parts[1] |
||||||
|
} |
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
// DeviceMapping represents the device mapping between the host and the container.
|
||||||
|
type DeviceMapping struct { |
||||||
|
PathOnHost string |
||||||
|
PathInContainer string |
||||||
|
CgroupPermissions string |
||||||
|
} |
||||||
|
|
||||||
|
// RestartPolicy represents the restart policies of the container.
|
||||||
|
type RestartPolicy struct { |
||||||
|
Name string |
||||||
|
MaximumRetryCount int |
||||||
|
} |
||||||
|
|
||||||
|
// IsNone indicates whether the container has the "no" restart policy.
|
||||||
|
// This means the container will not automatically restart when exiting.
|
||||||
|
func (rp *RestartPolicy) IsNone() bool { |
||||||
|
return rp.Name == "no" || rp.Name == "" |
||||||
|
} |
||||||
|
|
||||||
|
// IsAlways indicates whether the container has the "always" restart policy.
|
||||||
|
// This means the container will automatically restart regardless of the exit status.
|
||||||
|
func (rp *RestartPolicy) IsAlways() bool { |
||||||
|
return rp.Name == "always" |
||||||
|
} |
||||||
|
|
||||||
|
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
|
||||||
|
// This means the container will automatically restart of exiting with a non-zero exit status.
|
||||||
|
func (rp *RestartPolicy) IsOnFailure() bool { |
||||||
|
return rp.Name == "on-failure" |
||||||
|
} |
||||||
|
|
||||||
|
// IsUnlessStopped indicates whether the container has the
|
||||||
|
// "unless-stopped" restart policy. This means the container will
|
||||||
|
// automatically restart unless user has put it to stopped state.
|
||||||
|
func (rp *RestartPolicy) IsUnlessStopped() bool { |
||||||
|
return rp.Name == "unless-stopped" |
||||||
|
} |
||||||
|
|
||||||
|
// IsSame compares two RestartPolicy to see if they are the same
|
||||||
|
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { |
||||||
|
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount |
||||||
|
} |
||||||
|
|
||||||
|
// LogConfig represents the logging configuration of the container.
|
||||||
|
type LogConfig struct { |
||||||
|
Type string |
||||||
|
Config map[string]string |
||||||
|
} |
||||||
|
|
||||||
|
// Resources contains container's resources (cgroups config, ulimits...)
|
||||||
|
type Resources struct { |
||||||
|
// Applicable to all platforms
|
||||||
|
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
|
||||||
|
Memory int64 // Memory limit (in bytes)
|
||||||
|
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
|
||||||
|
|
||||||
|
// Applicable to UNIX platforms
|
||||||
|
CgroupParent string // Parent cgroup.
|
||||||
|
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
|
||||||
|
BlkioWeightDevice []*blkiodev.WeightDevice |
||||||
|
BlkioDeviceReadBps []*blkiodev.ThrottleDevice |
||||||
|
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice |
||||||
|
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice |
||||||
|
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice |
||||||
|
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
|
||||||
|
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
|
||||||
|
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
|
||||||
|
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
|
||||||
|
CpusetCpus string // CpusetCpus 0-2, 0,1
|
||||||
|
CpusetMems string // CpusetMems 0-2, 0,1
|
||||||
|
Devices []DeviceMapping // List of devices to map inside the container
|
||||||
|
DiskQuota int64 // Disk limit (in bytes)
|
||||||
|
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||||
|
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||||
|
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||||
|
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||||
|
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||||
|
PidsLimit int64 // Setting pids limit for a container
|
||||||
|
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||||
|
|
||||||
|
// Applicable to Windows
|
||||||
|
CPUCount int64 `json:"CpuCount"` // CPU count
|
||||||
|
CPUPercent int64 `json:"CpuPercent"` // CPU percent
|
||||||
|
IOMaximumIOps uint64 // Maximum IOps for the container system drive
|
||||||
|
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
|
||||||
|
} |
||||||
|
|
||||||
|
// UpdateConfig holds the mutable attributes of a Container.
|
||||||
|
// Those attributes can be updated at runtime.
|
||||||
|
type UpdateConfig struct { |
||||||
|
// Contains container's resources (cgroups, ulimits)
|
||||||
|
Resources |
||||||
|
RestartPolicy RestartPolicy |
||||||
|
} |
||||||
|
|
||||||
|
// HostConfig the non-portable Config structure of a container.
|
||||||
|
// Here, "non-portable" means "dependent of the host we are running on".
|
||||||
|
// Portable information *should* appear in Config.
|
||||||
|
type HostConfig struct { |
||||||
|
// Applicable to all platforms
|
||||||
|
Binds []string // List of volume bindings for this container
|
||||||
|
ContainerIDFile string // File (path) where the containerId is written
|
||||||
|
LogConfig LogConfig // Configuration of the logs for this container
|
||||||
|
NetworkMode NetworkMode // Network mode to use for the container
|
||||||
|
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
|
||||||
|
RestartPolicy RestartPolicy // Restart policy to be used for the container
|
||||||
|
AutoRemove bool // Automatically remove container when it exits
|
||||||
|
VolumeDriver string // Name of the volume driver used to mount volumes
|
||||||
|
VolumesFrom []string // List of volumes to take from other container
|
||||||
|
|
||||||
|
// Applicable to UNIX platforms
|
||||||
|
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||||
|
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||||
|
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||||
|
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||||
|
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||||
|
ExtraHosts []string // List of extra hosts
|
||||||
|
GroupAdd []string // List of additional groups that the container process will run as
|
||||||
|
IpcMode IpcMode // IPC namespace to use for the container
|
||||||
|
Cgroup CgroupSpec // Cgroup to use for the container
|
||||||
|
Links []string // List of links (in the name:alias form)
|
||||||
|
OomScoreAdj int // Container preference for OOM-killing
|
||||||
|
PidMode PidMode // PID namespace to use for the container
|
||||||
|
Privileged bool // Is the container in privileged mode
|
||||||
|
PublishAllPorts bool // Should docker publish all exposed port for the container
|
||||||
|
ReadonlyRootfs bool // Is the container root filesystem in read-only
|
||||||
|
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
|
||||||
|
StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
|
||||||
|
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
|
||||||
|
UTSMode UTSMode // UTS namespace to use for the container
|
||||||
|
UsernsMode UsernsMode // The user namespace to use for the container
|
||||||
|
ShmSize int64 // Total shm memory usage
|
||||||
|
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
|
||||||
|
Runtime string `json:",omitempty"` // Runtime to use with this container
|
||||||
|
|
||||||
|
// Applicable to Windows
|
||||||
|
ConsoleSize [2]uint // Initial console size (height,width)
|
||||||
|
Isolation Isolation // Isolation technology of the container (eg default, hyperv)
|
||||||
|
|
||||||
|
// Contains container's resources (cgroups, ulimits)
|
||||||
|
Resources |
||||||
|
|
||||||
|
// Mounts specs used by the container
|
||||||
|
Mounts []mount.Mount `json:",omitempty"` |
||||||
|
|
||||||
|
// Run a custom init inside the container, if null, use the daemon's configured settings
|
||||||
|
Init *bool `json:",omitempty"` |
||||||
|
|
||||||
|
// Custom init path
|
||||||
|
InitPath string `json:",omitempty"` |
||||||
|
} |
@ -0,0 +1,81 @@ |
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package container |
||||||
|
|
||||||
|
import "strings" |
||||||
|
|
||||||
|
// IsValid indicates if an isolation technology is valid
|
||||||
|
func (i Isolation) IsValid() bool { |
||||||
|
return i.IsDefault() |
||||||
|
} |
||||||
|
|
||||||
|
// IsPrivate indicates whether container uses its private network stack.
|
||||||
|
func (n NetworkMode) IsPrivate() bool { |
||||||
|
return !(n.IsHost() || n.IsContainer()) |
||||||
|
} |
||||||
|
|
||||||
|
// IsDefault indicates whether container uses the default network stack.
|
||||||
|
func (n NetworkMode) IsDefault() bool { |
||||||
|
return n == "default" |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkName returns the name of the network stack.
|
||||||
|
func (n NetworkMode) NetworkName() string { |
||||||
|
if n.IsBridge() { |
||||||
|
return "bridge" |
||||||
|
} else if n.IsHost() { |
||||||
|
return "host" |
||||||
|
} else if n.IsContainer() { |
||||||
|
return "container" |
||||||
|
} else if n.IsNone() { |
||||||
|
return "none" |
||||||
|
} else if n.IsDefault() { |
||||||
|
return "default" |
||||||
|
} else if n.IsUserDefined() { |
||||||
|
return n.UserDefined() |
||||||
|
} |
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
// IsBridge indicates whether container uses the bridge network stack
|
||||||
|
func (n NetworkMode) IsBridge() bool { |
||||||
|
return n == "bridge" |
||||||
|
} |
||||||
|
|
||||||
|
// IsHost indicates whether container uses the host network stack.
|
||||||
|
func (n NetworkMode) IsHost() bool { |
||||||
|
return n == "host" |
||||||
|
} |
||||||
|
|
||||||
|
// IsContainer indicates whether container uses a container network stack.
|
||||||
|
func (n NetworkMode) IsContainer() bool { |
||||||
|
parts := strings.SplitN(string(n), ":", 2) |
||||||
|
return len(parts) > 1 && parts[0] == "container" |
||||||
|
} |
||||||
|
|
||||||
|
// IsNone indicates whether container isn't using a network stack.
|
||||||
|
func (n NetworkMode) IsNone() bool { |
||||||
|
return n == "none" |
||||||
|
} |
||||||
|
|
||||||
|
// ConnectedContainer is the id of the container which network this container is connected to.
|
||||||
|
func (n NetworkMode) ConnectedContainer() string { |
||||||
|
parts := strings.SplitN(string(n), ":", 2) |
||||||
|
if len(parts) > 1 { |
||||||
|
return parts[1] |
||||||
|
} |
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
// IsUserDefined indicates user-created network
|
||||||
|
func (n NetworkMode) IsUserDefined() bool { |
||||||
|
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() |
||||||
|
} |
||||||
|
|
||||||
|
//UserDefined indicates user-created network
|
||||||
|
func (n NetworkMode) UserDefined() string { |
||||||
|
if n.IsUserDefined() { |
||||||
|
return string(n) |
||||||
|
} |
||||||
|
return "" |
||||||
|
} |
87
tools/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
generated
vendored
87
tools/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go
generated
vendored
@ -0,0 +1,87 @@ |
|||||||
|
package container |
||||||
|
|
||||||
|
import ( |
||||||
|
"strings" |
||||||
|
) |
||||||
|
|
||||||
|
// IsDefault indicates whether container uses the default network stack.
|
||||||
|
func (n NetworkMode) IsDefault() bool { |
||||||
|
return n == "default" |
||||||
|
} |
||||||
|
|
||||||
|
// IsNone indicates whether container isn't using a network stack.
|
||||||
|
func (n NetworkMode) IsNone() bool { |
||||||
|
return n == "none" |
||||||
|
} |
||||||
|
|
||||||
|
// IsContainer indicates whether container uses a container network stack.
|
||||||
|
// Returns false as windows doesn't support this mode
|
||||||
|
func (n NetworkMode) IsContainer() bool { |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// IsBridge indicates whether container uses the bridge network stack
|
||||||
|
// in windows it is given the name NAT
|
||||||
|
func (n NetworkMode) IsBridge() bool { |
||||||
|
return n == "nat" |
||||||
|
} |
||||||
|
|
||||||
|
// IsHost indicates whether container uses the host network stack.
|
||||||
|
// returns false as this is not supported by windows
|
||||||
|
func (n NetworkMode) IsHost() bool { |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// IsPrivate indicates whether container uses its private network stack.
|
||||||
|
func (n NetworkMode) IsPrivate() bool { |
||||||
|
return !(n.IsHost() || n.IsContainer()) |
||||||
|
} |
||||||
|
|
||||||
|
// ConnectedContainer is the id of the container which network this container is connected to.
|
||||||
|
// Returns blank string on windows
|
||||||
|
func (n NetworkMode) ConnectedContainer() string { |
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
// IsUserDefined indicates user-created network
|
||||||
|
func (n NetworkMode) IsUserDefined() bool { |
||||||
|
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() |
||||||
|
} |
||||||
|
|
||||||
|
// IsHyperV indicates the use of a Hyper-V partition for isolation
|
||||||
|
func (i Isolation) IsHyperV() bool { |
||||||
|
return strings.ToLower(string(i)) == "hyperv" |
||||||
|
} |
||||||
|
|
||||||
|
// IsProcess indicates the use of process isolation
|
||||||
|
func (i Isolation) IsProcess() bool { |
||||||
|
return strings.ToLower(string(i)) == "process" |
||||||
|
} |
||||||
|
|
||||||
|
// IsValid indicates if an isolation technology is valid
|
||||||
|
func (i Isolation) IsValid() bool { |
||||||
|
return i.IsDefault() || i.IsHyperV() || i.IsProcess() |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkName returns the name of the network stack.
|
||||||
|
func (n NetworkMode) NetworkName() string { |
||||||
|
if n.IsDefault() { |
||||||
|
return "default" |
||||||
|
} else if n.IsBridge() { |
||||||
|
return "nat" |
||||||
|
} else if n.IsNone() { |
||||||
|
return "none" |
||||||
|
} else if n.IsUserDefined() { |
||||||
|
return n.UserDefined() |
||||||
|
} |
||||||
|
|
||||||
|
return "" |
||||||
|
} |
||||||
|
|
||||||
|
//UserDefined indicates user-created network
|
||||||
|
func (n NetworkMode) UserDefined() string { |
||||||
|
if n.IsUserDefined() { |
||||||
|
return string(n) |
||||||
|
} |
||||||
|
return "" |
||||||
|
} |
@ -0,0 +1,13 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// ErrorResponse Represents an error.
|
||||||
|
// swagger:model ErrorResponse
|
||||||
|
type ErrorResponse struct { |
||||||
|
|
||||||
|
// The error message.
|
||||||
|
// Required: true
|
||||||
|
Message string `json:"message"` |
||||||
|
} |
@ -0,0 +1,42 @@ |
|||||||
|
package events |
||||||
|
|
||||||
|
const ( |
||||||
|
// ContainerEventType is the event type that containers generate
|
||||||
|
ContainerEventType = "container" |
||||||
|
// DaemonEventType is the event type that daemon generate
|
||||||
|
DaemonEventType = "daemon" |
||||||
|
// ImageEventType is the event type that images generate
|
||||||
|
ImageEventType = "image" |
||||||
|
// NetworkEventType is the event type that networks generate
|
||||||
|
NetworkEventType = "network" |
||||||
|
// PluginEventType is the event type that plugins generate
|
||||||
|
PluginEventType = "plugin" |
||||||
|
// VolumeEventType is the event type that volumes generate
|
||||||
|
VolumeEventType = "volume" |
||||||
|
) |
||||||
|
|
||||||
|
// Actor describes something that generates events,
|
||||||
|
// like a container, or a network, or a volume.
|
||||||
|
// It has a defined name and a set or attributes.
|
||||||
|
// The container attributes are its labels, other actors
|
||||||
|
// can generate these attributes from other properties.
|
||||||
|
type Actor struct { |
||||||
|
ID string |
||||||
|
Attributes map[string]string |
||||||
|
} |
||||||
|
|
||||||
|
// Message represents the information an event contains
|
||||||
|
type Message struct { |
||||||
|
// Deprecated information from JSONMessage.
|
||||||
|
// With data only in container events.
|
||||||
|
Status string `json:"status,omitempty"` |
||||||
|
ID string `json:"id,omitempty"` |
||||||
|
From string `json:"from,omitempty"` |
||||||
|
|
||||||
|
Type string |
||||||
|
Action string |
||||||
|
Actor Actor |
||||||
|
|
||||||
|
Time int64 `json:"time,omitempty"` |
||||||
|
TimeNano int64 `json:"timeNano,omitempty"` |
||||||
|
} |
@ -0,0 +1,310 @@ |
|||||||
|
// Package filters provides helper function to parse and handle command line
|
||||||
|
// filter, used for example in docker ps or docker images commands.
|
||||||
|
package filters |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"regexp" |
||||||
|
"strings" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/versions" |
||||||
|
) |
||||||
|
|
||||||
|
// Args stores filter arguments as map key:{map key: bool}.
|
||||||
|
// It contains an aggregation of the map of arguments (which are in the form
|
||||||
|
// of -f 'key=value') based on the key, and stores values for the same key
|
||||||
|
// in a map with string keys and boolean values.
|
||||||
|
// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
|
||||||
|
// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
|
||||||
|
type Args struct { |
||||||
|
fields map[string]map[string]bool |
||||||
|
} |
||||||
|
|
||||||
|
// NewArgs initializes a new Args struct.
|
||||||
|
func NewArgs() Args { |
||||||
|
return Args{fields: map[string]map[string]bool{}} |
||||||
|
} |
||||||
|
|
||||||
|
// ParseFlag parses the argument to the filter flag. Like
|
||||||
|
//
|
||||||
|
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
|
||||||
|
//
|
||||||
|
// If prev map is provided, then it is appended to, and returned. By default a new
|
||||||
|
// map is created.
|
||||||
|
func ParseFlag(arg string, prev Args) (Args, error) { |
||||||
|
filters := prev |
||||||
|
if len(arg) == 0 { |
||||||
|
return filters, nil |
||||||
|
} |
||||||
|
|
||||||
|
if !strings.Contains(arg, "=") { |
||||||
|
return filters, ErrBadFormat |
||||||
|
} |
||||||
|
|
||||||
|
f := strings.SplitN(arg, "=", 2) |
||||||
|
|
||||||
|
name := strings.ToLower(strings.TrimSpace(f[0])) |
||||||
|
value := strings.TrimSpace(f[1]) |
||||||
|
|
||||||
|
filters.Add(name, value) |
||||||
|
|
||||||
|
return filters, nil |
||||||
|
} |
||||||
|
|
||||||
|
// ErrBadFormat is an error returned in case of bad format for a filter.
|
||||||
|
var ErrBadFormat = errors.New("bad format of filter (expected name=value)") |
||||||
|
|
||||||
|
// ToParam packs the Args into a string for easy transport from client to server.
|
||||||
|
func ToParam(a Args) (string, error) { |
||||||
|
// this way we don't URL encode {}, just empty space
|
||||||
|
if a.Len() == 0 { |
||||||
|
return "", nil |
||||||
|
} |
||||||
|
|
||||||
|
buf, err := json.Marshal(a.fields) |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
return string(buf), nil |
||||||
|
} |
||||||
|
|
||||||
|
// ToParamWithVersion packs the Args into a string for easy transport from client to server.
|
||||||
|
// The generated string will depend on the specified version (corresponding to the API version).
|
||||||
|
func ToParamWithVersion(version string, a Args) (string, error) { |
||||||
|
// this way we don't URL encode {}, just empty space
|
||||||
|
if a.Len() == 0 { |
||||||
|
return "", nil |
||||||
|
} |
||||||
|
|
||||||
|
// for daemons older than v1.10, filter must be of the form map[string][]string
|
||||||
|
buf := []byte{} |
||||||
|
err := errors.New("") |
||||||
|
if version != "" && versions.LessThan(version, "1.22") { |
||||||
|
buf, err = json.Marshal(convertArgsToSlice(a.fields)) |
||||||
|
} else { |
||||||
|
buf, err = json.Marshal(a.fields) |
||||||
|
} |
||||||
|
if err != nil { |
||||||
|
return "", err |
||||||
|
} |
||||||
|
return string(buf), nil |
||||||
|
} |
||||||
|
|
||||||
|
// FromParam unpacks the filter Args.
|
||||||
|
func FromParam(p string) (Args, error) { |
||||||
|
if len(p) == 0 { |
||||||
|
return NewArgs(), nil |
||||||
|
} |
||||||
|
|
||||||
|
r := strings.NewReader(p) |
||||||
|
d := json.NewDecoder(r) |
||||||
|
|
||||||
|
m := map[string]map[string]bool{} |
||||||
|
if err := d.Decode(&m); err != nil { |
||||||
|
r.Seek(0, 0) |
||||||
|
|
||||||
|
// Allow parsing old arguments in slice format.
|
||||||
|
// Because other libraries might be sending them in this format.
|
||||||
|
deprecated := map[string][]string{} |
||||||
|
if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { |
||||||
|
m = deprecatedArgs(deprecated) |
||||||
|
} else { |
||||||
|
return NewArgs(), err |
||||||
|
} |
||||||
|
} |
||||||
|
return Args{m}, nil |
||||||
|
} |
||||||
|
|
||||||
|
// Get returns the list of values associates with a field.
|
||||||
|
// It returns a slice of strings to keep backwards compatibility with old code.
|
||||||
|
func (filters Args) Get(field string) []string { |
||||||
|
values := filters.fields[field] |
||||||
|
if values == nil { |
||||||
|
return make([]string, 0) |
||||||
|
} |
||||||
|
slice := make([]string, 0, len(values)) |
||||||
|
for key := range values { |
||||||
|
slice = append(slice, key) |
||||||
|
} |
||||||
|
return slice |
||||||
|
} |
||||||
|
|
||||||
|
// Add adds a new value to a filter field.
|
||||||
|
func (filters Args) Add(name, value string) { |
||||||
|
if _, ok := filters.fields[name]; ok { |
||||||
|
filters.fields[name][value] = true |
||||||
|
} else { |
||||||
|
filters.fields[name] = map[string]bool{value: true} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Del removes a value from a filter field.
|
||||||
|
func (filters Args) Del(name, value string) { |
||||||
|
if _, ok := filters.fields[name]; ok { |
||||||
|
delete(filters.fields[name], value) |
||||||
|
if len(filters.fields[name]) == 0 { |
||||||
|
delete(filters.fields, name) |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
// Len returns the number of fields in the arguments.
|
||||||
|
func (filters Args) Len() int { |
||||||
|
return len(filters.fields) |
||||||
|
} |
||||||
|
|
||||||
|
// MatchKVList returns true if the values for the specified field matches the ones
|
||||||
|
// from the sources.
|
||||||
|
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||||
|
// field is 'label' and sources are {'label1': '1', 'label2': '2'}
|
||||||
|
// it returns true.
|
||||||
|
func (filters Args) MatchKVList(field string, sources map[string]string) bool { |
||||||
|
fieldValues := filters.fields[field] |
||||||
|
|
||||||
|
//do not filter if there is no filter set or cannot determine filter
|
||||||
|
if len(fieldValues) == 0 { |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
if len(sources) == 0 { |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
for name2match := range fieldValues { |
||||||
|
testKV := strings.SplitN(name2match, "=", 2) |
||||||
|
|
||||||
|
v, ok := sources[testKV[0]] |
||||||
|
if !ok { |
||||||
|
return false |
||||||
|
} |
||||||
|
if len(testKV) == 2 && testKV[1] != v { |
||||||
|
return false |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// Match returns true if the values for the specified field matches the source string
|
||||||
|
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
|
||||||
|
// field is 'image.name' and source is 'ubuntu'
|
||||||
|
// it returns true.
|
||||||
|
func (filters Args) Match(field, source string) bool { |
||||||
|
if filters.ExactMatch(field, source) { |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
fieldValues := filters.fields[field] |
||||||
|
for name2match := range fieldValues { |
||||||
|
match, err := regexp.MatchString(name2match, source) |
||||||
|
if err != nil { |
||||||
|
continue |
||||||
|
} |
||||||
|
if match { |
||||||
|
return true |
||||||
|
} |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// ExactMatch returns true if the source matches exactly one of the filters.
|
||||||
|
func (filters Args) ExactMatch(field, source string) bool { |
||||||
|
fieldValues, ok := filters.fields[field] |
||||||
|
//do not filter if there is no filter set or cannot determine filter
|
||||||
|
if !ok || len(fieldValues) == 0 { |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
// try to match full name value to avoid O(N) regular expression matching
|
||||||
|
return fieldValues[source] |
||||||
|
} |
||||||
|
|
||||||
|
// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
|
||||||
|
func (filters Args) UniqueExactMatch(field, source string) bool { |
||||||
|
fieldValues := filters.fields[field] |
||||||
|
//do not filter if there is no filter set or cannot determine filter
|
||||||
|
if len(fieldValues) == 0 { |
||||||
|
return true |
||||||
|
} |
||||||
|
if len(filters.fields[field]) != 1 { |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// try to match full name value to avoid O(N) regular expression matching
|
||||||
|
return fieldValues[source] |
||||||
|
} |
||||||
|
|
||||||
|
// FuzzyMatch returns true if the source matches exactly one of the filters,
|
||||||
|
// or the source has one of the filters as a prefix.
|
||||||
|
func (filters Args) FuzzyMatch(field, source string) bool { |
||||||
|
if filters.ExactMatch(field, source) { |
||||||
|
return true |
||||||
|
} |
||||||
|
|
||||||
|
fieldValues := filters.fields[field] |
||||||
|
for prefix := range fieldValues { |
||||||
|
if strings.HasPrefix(source, prefix) { |
||||||
|
return true |
||||||
|
} |
||||||
|
} |
||||||
|
return false |
||||||
|
} |
||||||
|
|
||||||
|
// Include returns true if the name of the field to filter is in the filters.
|
||||||
|
func (filters Args) Include(field string) bool { |
||||||
|
_, ok := filters.fields[field] |
||||||
|
return ok |
||||||
|
} |
||||||
|
|
||||||
|
// Validate ensures that all the fields in the filter are valid.
|
||||||
|
// It returns an error as soon as it finds an invalid field.
|
||||||
|
func (filters Args) Validate(accepted map[string]bool) error { |
||||||
|
for name := range filters.fields { |
||||||
|
if !accepted[name] { |
||||||
|
return fmt.Errorf("Invalid filter '%s'", name) |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// WalkValues iterates over the list of filtered values for a field.
|
||||||
|
// It stops the iteration if it finds an error and it returns that error.
|
||||||
|
func (filters Args) WalkValues(field string, op func(value string) error) error { |
||||||
|
if _, ok := filters.fields[field]; !ok { |
||||||
|
return nil |
||||||
|
} |
||||||
|
for v := range filters.fields[field] { |
||||||
|
if err := op(v); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
func deprecatedArgs(d map[string][]string) map[string]map[string]bool { |
||||||
|
m := map[string]map[string]bool{} |
||||||
|
for k, v := range d { |
||||||
|
values := map[string]bool{} |
||||||
|
for _, vv := range v { |
||||||
|
values[vv] = true |
||||||
|
} |
||||||
|
m[k] = values |
||||||
|
} |
||||||
|
return m |
||||||
|
} |
||||||
|
|
||||||
|
func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { |
||||||
|
m := map[string][]string{} |
||||||
|
for k, v := range f { |
||||||
|
values := []string{} |
||||||
|
for kk := range v { |
||||||
|
if v[kk] { |
||||||
|
values = append(values, kk) |
||||||
|
} |
||||||
|
} |
||||||
|
m[k] = values |
||||||
|
} |
||||||
|
return m |
||||||
|
} |
@ -0,0 +1,13 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// IDResponse Response to an API call that returns just an Id
|
||||||
|
// swagger:model IdResponse
|
||||||
|
type IDResponse struct { |
||||||
|
|
||||||
|
// The id of the newly created object.
|
||||||
|
// Required: true
|
||||||
|
ID string `json:"Id"` |
||||||
|
} |
@ -0,0 +1,49 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// ImageSummary image summary
|
||||||
|
// swagger:model ImageSummary
|
||||||
|
type ImageSummary struct { |
||||||
|
|
||||||
|
// containers
|
||||||
|
// Required: true
|
||||||
|
Containers int64 `json:"Containers"` |
||||||
|
|
||||||
|
// created
|
||||||
|
// Required: true
|
||||||
|
Created int64 `json:"Created"` |
||||||
|
|
||||||
|
// Id
|
||||||
|
// Required: true
|
||||||
|
ID string `json:"Id"` |
||||||
|
|
||||||
|
// labels
|
||||||
|
// Required: true
|
||||||
|
Labels map[string]string `json:"Labels"` |
||||||
|
|
||||||
|
// parent Id
|
||||||
|
// Required: true
|
||||||
|
ParentID string `json:"ParentId"` |
||||||
|
|
||||||
|
// repo digests
|
||||||
|
// Required: true
|
||||||
|
RepoDigests []string `json:"RepoDigests"` |
||||||
|
|
||||||
|
// repo tags
|
||||||
|
// Required: true
|
||||||
|
RepoTags []string `json:"RepoTags"` |
||||||
|
|
||||||
|
// shared size
|
||||||
|
// Required: true
|
||||||
|
SharedSize int64 `json:"SharedSize"` |
||||||
|
|
||||||
|
// size
|
||||||
|
// Required: true
|
||||||
|
Size int64 `json:"Size"` |
||||||
|
|
||||||
|
// virtual size
|
||||||
|
// Required: true
|
||||||
|
VirtualSize int64 `json:"VirtualSize"` |
||||||
|
} |
@ -0,0 +1,113 @@ |
|||||||
|
package mount |
||||||
|
|
||||||
|
import ( |
||||||
|
"os" |
||||||
|
) |
||||||
|
|
||||||
|
// Type represents the type of a mount.
|
||||||
|
type Type string |
||||||
|
|
||||||
|
// Type constants
|
||||||
|
const ( |
||||||
|
// TypeBind is the type for mounting host dir
|
||||||
|
TypeBind Type = "bind" |
||||||
|
// TypeVolume is the type for remote storage volumes
|
||||||
|
TypeVolume Type = "volume" |
||||||
|
// TypeTmpfs is the type for mounting tmpfs
|
||||||
|
TypeTmpfs Type = "tmpfs" |
||||||
|
) |
||||||
|
|
||||||
|
// Mount represents a mount (volume).
|
||||||
|
type Mount struct { |
||||||
|
Type Type `json:",omitempty"` |
||||||
|
// Source specifies the name of the mount. Depending on mount type, this
|
||||||
|
// may be a volume name or a host path, or even ignored.
|
||||||
|
// Source is not supported for tmpfs (must be an empty value)
|
||||||
|
Source string `json:",omitempty"` |
||||||
|
Target string `json:",omitempty"` |
||||||
|
ReadOnly bool `json:",omitempty"` |
||||||
|
|
||||||
|
BindOptions *BindOptions `json:",omitempty"` |
||||||
|
VolumeOptions *VolumeOptions `json:",omitempty"` |
||||||
|
TmpfsOptions *TmpfsOptions `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Propagation represents the propagation of a mount.
|
||||||
|
type Propagation string |
||||||
|
|
||||||
|
const ( |
||||||
|
// PropagationRPrivate RPRIVATE
|
||||||
|
PropagationRPrivate Propagation = "rprivate" |
||||||
|
// PropagationPrivate PRIVATE
|
||||||
|
PropagationPrivate Propagation = "private" |
||||||
|
// PropagationRShared RSHARED
|
||||||
|
PropagationRShared Propagation = "rshared" |
||||||
|
// PropagationShared SHARED
|
||||||
|
PropagationShared Propagation = "shared" |
||||||
|
// PropagationRSlave RSLAVE
|
||||||
|
PropagationRSlave Propagation = "rslave" |
||||||
|
// PropagationSlave SLAVE
|
||||||
|
PropagationSlave Propagation = "slave" |
||||||
|
) |
||||||
|
|
||||||
|
// Propagations is the list of all valid mount propagations
|
||||||
|
var Propagations = []Propagation{ |
||||||
|
PropagationRPrivate, |
||||||
|
PropagationPrivate, |
||||||
|
PropagationRShared, |
||||||
|
PropagationShared, |
||||||
|
PropagationRSlave, |
||||||
|
PropagationSlave, |
||||||
|
} |
||||||
|
|
||||||
|
// BindOptions defines options specific to mounts of type "bind".
|
||||||
|
type BindOptions struct { |
||||||
|
Propagation Propagation `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// VolumeOptions represents the options for a mount of type volume.
|
||||||
|
type VolumeOptions struct { |
||||||
|
NoCopy bool `json:",omitempty"` |
||||||
|
Labels map[string]string `json:",omitempty"` |
||||||
|
DriverConfig *Driver `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Driver represents a volume driver.
|
||||||
|
type Driver struct { |
||||||
|
Name string `json:",omitempty"` |
||||||
|
Options map[string]string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// TmpfsOptions defines options specific to mounts of type "tmpfs".
|
||||||
|
type TmpfsOptions struct { |
||||||
|
// Size sets the size of the tmpfs, in bytes.
|
||||||
|
//
|
||||||
|
// This will be converted to an operating system specific value
|
||||||
|
// depending on the host. For example, on linux, it will be convered to
|
||||||
|
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
|
||||||
|
// docker, uses a straight byte value.
|
||||||
|
//
|
||||||
|
// Percentages are not supported.
|
||||||
|
SizeBytes int64 `json:",omitempty"` |
||||||
|
// Mode of the tmpfs upon creation
|
||||||
|
Mode os.FileMode `json:",omitempty"` |
||||||
|
|
||||||
|
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
||||||
|
// daemon, that are accepted. Only the most basic are added for now.
|
||||||
|
//
|
||||||
|
// From docker/docker/pkg/mount/flags.go:
|
||||||
|
//
|
||||||
|
// var validFlags = map[string]bool{
|
||||||
|
// "": true,
|
||||||
|
// "size": true, X
|
||||||
|
// "mode": true, X
|
||||||
|
// "uid": true,
|
||||||
|
// "gid": true,
|
||||||
|
// "nr_inodes": true,
|
||||||
|
// "nr_blocks": true,
|
||||||
|
// "mpol": true,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Some of these may be straightforward to add, but others, such as
|
||||||
|
// uid/gid have implications in a clustered system.
|
||||||
|
} |
@ -0,0 +1,59 @@ |
|||||||
|
package network |
||||||
|
|
||||||
|
// Address represents an IP address
|
||||||
|
type Address struct { |
||||||
|
Addr string |
||||||
|
PrefixLen int |
||||||
|
} |
||||||
|
|
||||||
|
// IPAM represents IP Address Management
|
||||||
|
type IPAM struct { |
||||||
|
Driver string |
||||||
|
Options map[string]string //Per network IPAM driver options
|
||||||
|
Config []IPAMConfig |
||||||
|
} |
||||||
|
|
||||||
|
// IPAMConfig represents IPAM configurations
|
||||||
|
type IPAMConfig struct { |
||||||
|
Subnet string `json:",omitempty"` |
||||||
|
IPRange string `json:",omitempty"` |
||||||
|
Gateway string `json:",omitempty"` |
||||||
|
AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// EndpointIPAMConfig represents IPAM configurations for the endpoint
|
||||||
|
type EndpointIPAMConfig struct { |
||||||
|
IPv4Address string `json:",omitempty"` |
||||||
|
IPv6Address string `json:",omitempty"` |
||||||
|
LinkLocalIPs []string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// PeerInfo represents one peer of a overlay network
|
||||||
|
type PeerInfo struct { |
||||||
|
Name string |
||||||
|
IP string |
||||||
|
} |
||||||
|
|
||||||
|
// EndpointSettings stores the network endpoint details
|
||||||
|
type EndpointSettings struct { |
||||||
|
// Configurations
|
||||||
|
IPAMConfig *EndpointIPAMConfig |
||||||
|
Links []string |
||||||
|
Aliases []string |
||||||
|
// Operational data
|
||||||
|
NetworkID string |
||||||
|
EndpointID string |
||||||
|
Gateway string |
||||||
|
IPAddress string |
||||||
|
IPPrefixLen int |
||||||
|
IPv6Gateway string |
||||||
|
GlobalIPv6Address string |
||||||
|
GlobalIPv6PrefixLen int |
||||||
|
MacAddress string |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
||||||
|
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
|
||||||
|
type NetworkingConfig struct { |
||||||
|
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
|
||||||
|
} |
@ -0,0 +1,189 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// Plugin A plugin for the Engine API
|
||||||
|
// swagger:model Plugin
|
||||||
|
type Plugin struct { |
||||||
|
|
||||||
|
// config
|
||||||
|
// Required: true
|
||||||
|
Config PluginConfig `json:"Config"` |
||||||
|
|
||||||
|
// True when the plugin is running. False when the plugin is not running, only installed.
|
||||||
|
// Required: true
|
||||||
|
Enabled bool `json:"Enabled"` |
||||||
|
|
||||||
|
// Id
|
||||||
|
ID string `json:"Id,omitempty"` |
||||||
|
|
||||||
|
// name
|
||||||
|
// Required: true
|
||||||
|
Name string `json:"Name"` |
||||||
|
|
||||||
|
// plugin remote reference used to push/pull the plugin
|
||||||
|
PluginReference string `json:"PluginReference,omitempty"` |
||||||
|
|
||||||
|
// settings
|
||||||
|
// Required: true
|
||||||
|
Settings PluginSettings `json:"Settings"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginConfig The config of a plugin.
|
||||||
|
// swagger:model PluginConfig
|
||||||
|
type PluginConfig struct { |
||||||
|
|
||||||
|
// args
|
||||||
|
// Required: true
|
||||||
|
Args PluginConfigArgs `json:"Args"` |
||||||
|
|
||||||
|
// description
|
||||||
|
// Required: true
|
||||||
|
Description string `json:"Description"` |
||||||
|
|
||||||
|
// documentation
|
||||||
|
// Required: true
|
||||||
|
Documentation string `json:"Documentation"` |
||||||
|
|
||||||
|
// entrypoint
|
||||||
|
// Required: true
|
||||||
|
Entrypoint []string `json:"Entrypoint"` |
||||||
|
|
||||||
|
// env
|
||||||
|
// Required: true
|
||||||
|
Env []PluginEnv `json:"Env"` |
||||||
|
|
||||||
|
// interface
|
||||||
|
// Required: true
|
||||||
|
Interface PluginConfigInterface `json:"Interface"` |
||||||
|
|
||||||
|
// linux
|
||||||
|
// Required: true
|
||||||
|
Linux PluginConfigLinux `json:"Linux"` |
||||||
|
|
||||||
|
// mounts
|
||||||
|
// Required: true
|
||||||
|
Mounts []PluginMount `json:"Mounts"` |
||||||
|
|
||||||
|
// network
|
||||||
|
// Required: true
|
||||||
|
Network PluginConfigNetwork `json:"Network"` |
||||||
|
|
||||||
|
// propagated mount
|
||||||
|
// Required: true
|
||||||
|
PropagatedMount string `json:"PropagatedMount"` |
||||||
|
|
||||||
|
// user
|
||||||
|
User PluginConfigUser `json:"User,omitempty"` |
||||||
|
|
||||||
|
// work dir
|
||||||
|
// Required: true
|
||||||
|
WorkDir string `json:"WorkDir"` |
||||||
|
|
||||||
|
// rootfs
|
||||||
|
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginConfigArgs plugin config args
|
||||||
|
// swagger:model PluginConfigArgs
|
||||||
|
type PluginConfigArgs struct { |
||||||
|
|
||||||
|
// description
|
||||||
|
// Required: true
|
||||||
|
Description string `json:"Description"` |
||||||
|
|
||||||
|
// name
|
||||||
|
// Required: true
|
||||||
|
Name string `json:"Name"` |
||||||
|
|
||||||
|
// settable
|
||||||
|
// Required: true
|
||||||
|
Settable []string `json:"Settable"` |
||||||
|
|
||||||
|
// value
|
||||||
|
// Required: true
|
||||||
|
Value []string `json:"Value"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginConfigInterface The interface between Docker and the plugin
|
||||||
|
// swagger:model PluginConfigInterface
|
||||||
|
type PluginConfigInterface struct { |
||||||
|
|
||||||
|
// socket
|
||||||
|
// Required: true
|
||||||
|
Socket string `json:"Socket"` |
||||||
|
|
||||||
|
// types
|
||||||
|
// Required: true
|
||||||
|
Types []PluginInterfaceType `json:"Types"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginConfigLinux plugin config linux
|
||||||
|
// swagger:model PluginConfigLinux
|
||||||
|
type PluginConfigLinux struct { |
||||||
|
|
||||||
|
// allow all devices
|
||||||
|
// Required: true
|
||||||
|
AllowAllDevices bool `json:"AllowAllDevices"` |
||||||
|
|
||||||
|
// capabilities
|
||||||
|
// Required: true
|
||||||
|
Capabilities []string `json:"Capabilities"` |
||||||
|
|
||||||
|
// devices
|
||||||
|
// Required: true
|
||||||
|
Devices []PluginDevice `json:"Devices"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginConfigNetwork plugin config network
|
||||||
|
// swagger:model PluginConfigNetwork
|
||||||
|
type PluginConfigNetwork struct { |
||||||
|
|
||||||
|
// type
|
||||||
|
// Required: true
|
||||||
|
Type string `json:"Type"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginConfigRootfs plugin config rootfs
|
||||||
|
// swagger:model PluginConfigRootfs
|
||||||
|
type PluginConfigRootfs struct { |
||||||
|
|
||||||
|
// diff ids
|
||||||
|
DiffIds []string `json:"diff_ids"` |
||||||
|
|
||||||
|
// type
|
||||||
|
Type string `json:"type,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginConfigUser plugin config user
|
||||||
|
// swagger:model PluginConfigUser
|
||||||
|
type PluginConfigUser struct { |
||||||
|
|
||||||
|
// g ID
|
||||||
|
GID uint32 `json:"GID,omitempty"` |
||||||
|
|
||||||
|
// UID
|
||||||
|
UID uint32 `json:"UID,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginSettings Settings that can be modified by users.
|
||||||
|
// swagger:model PluginSettings
|
||||||
|
type PluginSettings struct { |
||||||
|
|
||||||
|
// args
|
||||||
|
// Required: true
|
||||||
|
Args []string `json:"Args"` |
||||||
|
|
||||||
|
// devices
|
||||||
|
// Required: true
|
||||||
|
Devices []PluginDevice `json:"Devices"` |
||||||
|
|
||||||
|
// env
|
||||||
|
// Required: true
|
||||||
|
Env []string `json:"Env"` |
||||||
|
|
||||||
|
// mounts
|
||||||
|
// Required: true
|
||||||
|
Mounts []PluginMount `json:"Mounts"` |
||||||
|
} |
@ -0,0 +1,25 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// PluginDevice plugin device
|
||||||
|
// swagger:model PluginDevice
|
||||||
|
type PluginDevice struct { |
||||||
|
|
||||||
|
// description
|
||||||
|
// Required: true
|
||||||
|
Description string `json:"Description"` |
||||||
|
|
||||||
|
// name
|
||||||
|
// Required: true
|
||||||
|
Name string `json:"Name"` |
||||||
|
|
||||||
|
// path
|
||||||
|
// Required: true
|
||||||
|
Path *string `json:"Path"` |
||||||
|
|
||||||
|
// settable
|
||||||
|
// Required: true
|
||||||
|
Settable []string `json:"Settable"` |
||||||
|
} |
@ -0,0 +1,25 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// PluginEnv plugin env
|
||||||
|
// swagger:model PluginEnv
|
||||||
|
type PluginEnv struct { |
||||||
|
|
||||||
|
// description
|
||||||
|
// Required: true
|
||||||
|
Description string `json:"Description"` |
||||||
|
|
||||||
|
// name
|
||||||
|
// Required: true
|
||||||
|
Name string `json:"Name"` |
||||||
|
|
||||||
|
// settable
|
||||||
|
// Required: true
|
||||||
|
Settable []string `json:"Settable"` |
||||||
|
|
||||||
|
// value
|
||||||
|
// Required: true
|
||||||
|
Value *string `json:"Value"` |
||||||
|
} |
@ -0,0 +1,21 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// PluginInterfaceType plugin interface type
|
||||||
|
// swagger:model PluginInterfaceType
|
||||||
|
type PluginInterfaceType struct { |
||||||
|
|
||||||
|
// capability
|
||||||
|
// Required: true
|
||||||
|
Capability string `json:"Capability"` |
||||||
|
|
||||||
|
// prefix
|
||||||
|
// Required: true
|
||||||
|
Prefix string `json:"Prefix"` |
||||||
|
|
||||||
|
// version
|
||||||
|
// Required: true
|
||||||
|
Version string `json:"Version"` |
||||||
|
} |
@ -0,0 +1,37 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// PluginMount plugin mount
|
||||||
|
// swagger:model PluginMount
|
||||||
|
type PluginMount struct { |
||||||
|
|
||||||
|
// description
|
||||||
|
// Required: true
|
||||||
|
Description string `json:"Description"` |
||||||
|
|
||||||
|
// destination
|
||||||
|
// Required: true
|
||||||
|
Destination string `json:"Destination"` |
||||||
|
|
||||||
|
// name
|
||||||
|
// Required: true
|
||||||
|
Name string `json:"Name"` |
||||||
|
|
||||||
|
// options
|
||||||
|
// Required: true
|
||||||
|
Options []string `json:"Options"` |
||||||
|
|
||||||
|
// settable
|
||||||
|
// Required: true
|
||||||
|
Settable []string `json:"Settable"` |
||||||
|
|
||||||
|
// source
|
||||||
|
// Required: true
|
||||||
|
Source *string `json:"Source"` |
||||||
|
|
||||||
|
// type
|
||||||
|
// Required: true
|
||||||
|
Type string `json:"Type"` |
||||||
|
} |
@ -0,0 +1,64 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
) |
||||||
|
|
||||||
|
// PluginsListResponse contains the response for the Engine API
|
||||||
|
type PluginsListResponse []*Plugin |
||||||
|
|
||||||
|
const ( |
||||||
|
authzDriver = "AuthzDriver" |
||||||
|
graphDriver = "GraphDriver" |
||||||
|
ipamDriver = "IpamDriver" |
||||||
|
networkDriver = "NetworkDriver" |
||||||
|
volumeDriver = "VolumeDriver" |
||||||
|
) |
||||||
|
|
||||||
|
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
|
||||||
|
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { |
||||||
|
versionIndex := len(p) |
||||||
|
prefixIndex := 0 |
||||||
|
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { |
||||||
|
return fmt.Errorf("%q is not a plugin interface type", p) |
||||||
|
} |
||||||
|
p = p[1 : len(p)-1] |
||||||
|
loop: |
||||||
|
for i, b := range p { |
||||||
|
switch b { |
||||||
|
case '.': |
||||||
|
prefixIndex = i |
||||||
|
case '/': |
||||||
|
versionIndex = i |
||||||
|
break loop |
||||||
|
} |
||||||
|
} |
||||||
|
t.Prefix = string(p[:prefixIndex]) |
||||||
|
t.Capability = string(p[prefixIndex+1 : versionIndex]) |
||||||
|
if versionIndex < len(p) { |
||||||
|
t.Version = string(p[versionIndex+1:]) |
||||||
|
} |
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalJSON implements json.Marshaler for PluginInterfaceType
|
||||||
|
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { |
||||||
|
return json.Marshal(t.String()) |
||||||
|
} |
||||||
|
|
||||||
|
// String implements fmt.Stringer for PluginInterfaceType
|
||||||
|
func (t PluginInterfaceType) String() string { |
||||||
|
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) |
||||||
|
} |
||||||
|
|
||||||
|
// PluginPrivilege describes a permission the user has to accept
|
||||||
|
// upon installing a plugin.
|
||||||
|
type PluginPrivilege struct { |
||||||
|
Name string |
||||||
|
Description string |
||||||
|
Value []string |
||||||
|
} |
||||||
|
|
||||||
|
// PluginPrivileges is a list of PluginPrivilege
|
||||||
|
type PluginPrivileges []PluginPrivilege |
@ -0,0 +1,23 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// Port An open port on a container
|
||||||
|
// swagger:model Port
|
||||||
|
type Port struct { |
||||||
|
|
||||||
|
// IP
|
||||||
|
IP string `json:"IP,omitempty"` |
||||||
|
|
||||||
|
// Port on the container
|
||||||
|
// Required: true
|
||||||
|
PrivatePort uint16 `json:"PrivatePort"` |
||||||
|
|
||||||
|
// Port exposed on the host
|
||||||
|
PublicPort uint16 `json:"PublicPort,omitempty"` |
||||||
|
|
||||||
|
// type
|
||||||
|
// Required: true
|
||||||
|
Type string `json:"Type"` |
||||||
|
} |
@ -0,0 +1,34 @@ |
|||||||
|
package reference |
||||||
|
|
||||||
|
import ( |
||||||
|
distreference "github.com/docker/distribution/reference" |
||||||
|
) |
||||||
|
|
||||||
|
// Parse parses the given references and returns the repository and
|
||||||
|
// tag (if present) from it. If there is an error during parsing, it will
|
||||||
|
// return an error.
|
||||||
|
func Parse(ref string) (string, string, error) { |
||||||
|
distributionRef, err := distreference.ParseNamed(ref) |
||||||
|
if err != nil { |
||||||
|
return "", "", err |
||||||
|
} |
||||||
|
|
||||||
|
tag := GetTagFromNamedRef(distributionRef) |
||||||
|
return distributionRef.Name(), tag, nil |
||||||
|
} |
||||||
|
|
||||||
|
// GetTagFromNamedRef returns a tag from the specified reference.
|
||||||
|
// This function is necessary as long as the docker "server" api makes the distinction between repository
|
||||||
|
// and tags.
|
||||||
|
func GetTagFromNamedRef(ref distreference.Named) string { |
||||||
|
var tag string |
||||||
|
switch x := ref.(type) { |
||||||
|
case distreference.Digested: |
||||||
|
tag = x.Digest().String() |
||||||
|
case distreference.NamedTagged: |
||||||
|
tag = x.Tag() |
||||||
|
default: |
||||||
|
tag = "latest" |
||||||
|
} |
||||||
|
return tag |
||||||
|
} |
@ -0,0 +1,21 @@ |
|||||||
|
package registry |
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// DO NOT EDIT THIS FILE
|
||||||
|
// This file was generated by `swagger generate operation`
|
||||||
|
//
|
||||||
|
// See hack/swagger-gen.sh
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// AuthenticateOKBody authenticate o k body
|
||||||
|
// swagger:model AuthenticateOKBody
|
||||||
|
type AuthenticateOKBody struct { |
||||||
|
|
||||||
|
// An opaque token used to authenticate a user after a successful login
|
||||||
|
// Required: true
|
||||||
|
IdentityToken string `json:"IdentityToken"` |
||||||
|
|
||||||
|
// The status of the authentication
|
||||||
|
// Required: true
|
||||||
|
Status string `json:"Status"` |
||||||
|
} |
@ -0,0 +1,104 @@ |
|||||||
|
package registry |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"net" |
||||||
|
) |
||||||
|
|
||||||
|
// ServiceConfig stores daemon registry services configuration.
|
||||||
|
type ServiceConfig struct { |
||||||
|
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` |
||||||
|
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` |
||||||
|
Mirrors []string |
||||||
|
} |
||||||
|
|
||||||
|
// NetIPNet is the net.IPNet type, which can be marshalled and
|
||||||
|
// unmarshalled to JSON
|
||||||
|
type NetIPNet net.IPNet |
||||||
|
|
||||||
|
// String returns the CIDR notation of ipnet
|
||||||
|
func (ipnet *NetIPNet) String() string { |
||||||
|
return (*net.IPNet)(ipnet).String() |
||||||
|
} |
||||||
|
|
||||||
|
// MarshalJSON returns the JSON representation of the IPNet
|
||||||
|
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { |
||||||
|
return json.Marshal((*net.IPNet)(ipnet).String()) |
||||||
|
} |
||||||
|
|
||||||
|
// UnmarshalJSON sets the IPNet from a byte array of JSON
|
||||||
|
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { |
||||||
|
var ipnetStr string |
||||||
|
if err = json.Unmarshal(b, &ipnetStr); err == nil { |
||||||
|
var cidr *net.IPNet |
||||||
|
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { |
||||||
|
*ipnet = NetIPNet(*cidr) |
||||||
|
} |
||||||
|
} |
||||||
|
return |
||||||
|
} |
||||||
|
|
||||||
|
// IndexInfo contains information about a registry
|
||||||
|
//
|
||||||
|
// RepositoryInfo Examples:
|
||||||
|
// {
|
||||||
|
// "Index" : {
|
||||||
|
// "Name" : "docker.io",
|
||||||
|
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
||||||
|
// "Secure" : true,
|
||||||
|
// "Official" : true,
|
||||||
|
// },
|
||||||
|
// "RemoteName" : "library/debian",
|
||||||
|
// "LocalName" : "debian",
|
||||||
|
// "CanonicalName" : "docker.io/debian"
|
||||||
|
// "Official" : true,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "Index" : {
|
||||||
|
// "Name" : "127.0.0.1:5000",
|
||||||
|
// "Mirrors" : [],
|
||||||
|
// "Secure" : false,
|
||||||
|
// "Official" : false,
|
||||||
|
// },
|
||||||
|
// "RemoteName" : "user/repo",
|
||||||
|
// "LocalName" : "127.0.0.1:5000/user/repo",
|
||||||
|
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
||||||
|
// "Official" : false,
|
||||||
|
// }
|
||||||
|
type IndexInfo struct { |
||||||
|
// Name is the name of the registry, such as "docker.io"
|
||||||
|
Name string |
||||||
|
// Mirrors is a list of mirrors, expressed as URIs
|
||||||
|
Mirrors []string |
||||||
|
// Secure is set to false if the registry is part of the list of
|
||||||
|
// insecure registries. Insecure registries accept HTTP and/or accept
|
||||||
|
// HTTPS with certificates from unknown CAs.
|
||||||
|
Secure bool |
||||||
|
// Official indicates whether this is an official registry
|
||||||
|
Official bool |
||||||
|
} |
||||||
|
|
||||||
|
// SearchResult describes a search result returned from a registry
|
||||||
|
type SearchResult struct { |
||||||
|
// StarCount indicates the number of stars this repository has
|
||||||
|
StarCount int `json:"star_count"` |
||||||
|
// IsOfficial is true if the result is from an official repository.
|
||||||
|
IsOfficial bool `json:"is_official"` |
||||||
|
// Name is the name of the repository
|
||||||
|
Name string `json:"name"` |
||||||
|
// IsAutomated indicates whether the result is automated
|
||||||
|
IsAutomated bool `json:"is_automated"` |
||||||
|
// Description is a textual description of the repository
|
||||||
|
Description string `json:"description"` |
||||||
|
} |
||||||
|
|
||||||
|
// SearchResults lists a collection search results returned from a registry
|
||||||
|
type SearchResults struct { |
||||||
|
// Query contains the query string that generated the search results
|
||||||
|
Query string `json:"query"` |
||||||
|
// NumResults indicates the number of results the query returned
|
||||||
|
NumResults int `json:"num_results"` |
||||||
|
// Results is a slice containing the actual results for the search
|
||||||
|
Results []SearchResult `json:"results"` |
||||||
|
} |
@ -0,0 +1,93 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
||||||
|
type Seccomp struct { |
||||||
|
DefaultAction Action `json:"defaultAction"` |
||||||
|
// Architectures is kept to maintain backward compatibility with the old
|
||||||
|
// seccomp profile.
|
||||||
|
Architectures []Arch `json:"architectures,omitempty"` |
||||||
|
ArchMap []Architecture `json:"archMap,omitempty"` |
||||||
|
Syscalls []*Syscall `json:"syscalls"` |
||||||
|
} |
||||||
|
|
||||||
|
// Architecture is used to represent an specific architecture
|
||||||
|
// and its sub-architectures
|
||||||
|
type Architecture struct { |
||||||
|
Arch Arch `json:"architecture"` |
||||||
|
SubArches []Arch `json:"subArchitectures"` |
||||||
|
} |
||||||
|
|
||||||
|
// Arch used for architectures
|
||||||
|
type Arch string |
||||||
|
|
||||||
|
// Additional architectures permitted to be used for system calls
|
||||||
|
// By default only the native architecture of the kernel is permitted
|
||||||
|
const ( |
||||||
|
ArchX86 Arch = "SCMP_ARCH_X86" |
||||||
|
ArchX86_64 Arch = "SCMP_ARCH_X86_64" |
||||||
|
ArchX32 Arch = "SCMP_ARCH_X32" |
||||||
|
ArchARM Arch = "SCMP_ARCH_ARM" |
||||||
|
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" |
||||||
|
ArchMIPS Arch = "SCMP_ARCH_MIPS" |
||||||
|
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" |
||||||
|
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" |
||||||
|
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" |
||||||
|
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" |
||||||
|
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" |
||||||
|
ArchPPC Arch = "SCMP_ARCH_PPC" |
||||||
|
ArchPPC64 Arch = "SCMP_ARCH_PPC64" |
||||||
|
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" |
||||||
|
ArchS390 Arch = "SCMP_ARCH_S390" |
||||||
|
ArchS390X Arch = "SCMP_ARCH_S390X" |
||||||
|
) |
||||||
|
|
||||||
|
// Action taken upon Seccomp rule match
|
||||||
|
type Action string |
||||||
|
|
||||||
|
// Define actions for Seccomp rules
|
||||||
|
const ( |
||||||
|
ActKill Action = "SCMP_ACT_KILL" |
||||||
|
ActTrap Action = "SCMP_ACT_TRAP" |
||||||
|
ActErrno Action = "SCMP_ACT_ERRNO" |
||||||
|
ActTrace Action = "SCMP_ACT_TRACE" |
||||||
|
ActAllow Action = "SCMP_ACT_ALLOW" |
||||||
|
) |
||||||
|
|
||||||
|
// Operator used to match syscall arguments in Seccomp
|
||||||
|
type Operator string |
||||||
|
|
||||||
|
// Define operators for syscall arguments in Seccomp
|
||||||
|
const ( |
||||||
|
OpNotEqual Operator = "SCMP_CMP_NE" |
||||||
|
OpLessThan Operator = "SCMP_CMP_LT" |
||||||
|
OpLessEqual Operator = "SCMP_CMP_LE" |
||||||
|
OpEqualTo Operator = "SCMP_CMP_EQ" |
||||||
|
OpGreaterEqual Operator = "SCMP_CMP_GE" |
||||||
|
OpGreaterThan Operator = "SCMP_CMP_GT" |
||||||
|
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" |
||||||
|
) |
||||||
|
|
||||||
|
// Arg used for matching specific syscall arguments in Seccomp
|
||||||
|
type Arg struct { |
||||||
|
Index uint `json:"index"` |
||||||
|
Value uint64 `json:"value"` |
||||||
|
ValueTwo uint64 `json:"valueTwo"` |
||||||
|
Op Operator `json:"op"` |
||||||
|
} |
||||||
|
|
||||||
|
// Filter is used to conditionally apply Seccomp rules
|
||||||
|
type Filter struct { |
||||||
|
Caps []string `json:"caps,omitempty"` |
||||||
|
Arches []string `json:"arches,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Syscall is used to match a group of syscalls in Seccomp
|
||||||
|
type Syscall struct { |
||||||
|
Name string `json:"name,omitempty"` |
||||||
|
Names []string `json:"names,omitempty"` |
||||||
|
Action Action `json:"action"` |
||||||
|
Args []*Arg `json:"args"` |
||||||
|
Comment string `json:"comment"` |
||||||
|
Includes Filter `json:"includes"` |
||||||
|
Excludes Filter `json:"excludes"` |
||||||
|
} |
@ -0,0 +1,12 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// ServiceUpdateResponse service update response
|
||||||
|
// swagger:model ServiceUpdateResponse
|
||||||
|
type ServiceUpdateResponse struct { |
||||||
|
|
||||||
|
// Optional warning messages
|
||||||
|
Warnings []string `json:"Warnings"` |
||||||
|
} |
@ -0,0 +1,178 @@ |
|||||||
|
// Package types is used for API stability in the types and response to the
|
||||||
|
// consumers of the API stats endpoint.
|
||||||
|
package types |
||||||
|
|
||||||
|
import "time" |
||||||
|
|
||||||
|
// ThrottlingData stores CPU throttling stats of one running container.
|
||||||
|
// Not used on Windows.
|
||||||
|
type ThrottlingData struct { |
||||||
|
// Number of periods with throttling active
|
||||||
|
Periods uint64 `json:"periods"` |
||||||
|
// Number of periods when the container hits its throttling limit.
|
||||||
|
ThrottledPeriods uint64 `json:"throttled_periods"` |
||||||
|
// Aggregate time the container was throttled for in nanoseconds.
|
||||||
|
ThrottledTime uint64 `json:"throttled_time"` |
||||||
|
} |
||||||
|
|
||||||
|
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||||
|
type CPUUsage struct { |
||||||
|
// Total CPU time consumed.
|
||||||
|
// Units: nanoseconds (Linux)
|
||||||
|
// Units: 100's of nanoseconds (Windows)
|
||||||
|
TotalUsage uint64 `json:"total_usage"` |
||||||
|
|
||||||
|
// Total CPU time consumed per core (Linux). Not used on Windows.
|
||||||
|
// Units: nanoseconds.
|
||||||
|
PercpuUsage []uint64 `json:"percpu_usage,omitempty"` |
||||||
|
|
||||||
|
// Time spent by tasks of the cgroup in kernel mode (Linux).
|
||||||
|
// Time spent by all container processes in kernel mode (Windows).
|
||||||
|
// Units: nanoseconds (Linux).
|
||||||
|
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
|
||||||
|
UsageInKernelmode uint64 `json:"usage_in_kernelmode"` |
||||||
|
|
||||||
|
// Time spent by tasks of the cgroup in user mode (Linux).
|
||||||
|
// Time spent by all container processes in user mode (Windows).
|
||||||
|
// Units: nanoseconds (Linux).
|
||||||
|
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
|
||||||
|
UsageInUsermode uint64 `json:"usage_in_usermode"` |
||||||
|
} |
||||||
|
|
||||||
|
// CPUStats aggregates and wraps all CPU related info of container
|
||||||
|
type CPUStats struct { |
||||||
|
// CPU Usage. Linux and Windows.
|
||||||
|
CPUUsage CPUUsage `json:"cpu_usage"` |
||||||
|
|
||||||
|
// System Usage. Linux only.
|
||||||
|
SystemUsage uint64 `json:"system_cpu_usage,omitempty"` |
||||||
|
|
||||||
|
// Throttling Data. Linux only.
|
||||||
|
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// MemoryStats aggregates all memory stats since container inception on Linux.
|
||||||
|
// Windows returns stats for commit and private working set only.
|
||||||
|
type MemoryStats struct { |
||||||
|
// Linux Memory Stats
|
||||||
|
|
||||||
|
// current res_counter usage for memory
|
||||||
|
Usage uint64 `json:"usage,omitempty"` |
||||||
|
// maximum usage ever recorded.
|
||||||
|
MaxUsage uint64 `json:"max_usage,omitempty"` |
||||||
|
// TODO(vishh): Export these as stronger types.
|
||||||
|
// all the stats exported via memory.stat.
|
||||||
|
Stats map[string]uint64 `json:"stats,omitempty"` |
||||||
|
// number of times memory usage hits limits.
|
||||||
|
Failcnt uint64 `json:"failcnt,omitempty"` |
||||||
|
Limit uint64 `json:"limit,omitempty"` |
||||||
|
|
||||||
|
// Windows Memory Stats
|
||||||
|
// See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
|
||||||
|
|
||||||
|
// committed bytes
|
||||||
|
Commit uint64 `json:"commitbytes,omitempty"` |
||||||
|
// peak committed bytes
|
||||||
|
CommitPeak uint64 `json:"commitpeakbytes,omitempty"` |
||||||
|
// private working set
|
||||||
|
PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||||
|
// Not used on Windows.
|
||||||
|
type BlkioStatEntry struct { |
||||||
|
Major uint64 `json:"major"` |
||||||
|
Minor uint64 `json:"minor"` |
||||||
|
Op string `json:"op"` |
||||||
|
Value uint64 `json:"value"` |
||||||
|
} |
||||||
|
|
||||||
|
// BlkioStats stores All IO service stats for data read and write.
|
||||||
|
// This is a Linux specific structure as the differences between expressing
|
||||||
|
// block I/O on Windows and Linux are sufficiently significant to make
|
||||||
|
// little sense attempting to morph into a combined structure.
|
||||||
|
type BlkioStats struct { |
||||||
|
// number of bytes transferred to and from the block device
|
||||||
|
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` |
||||||
|
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` |
||||||
|
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` |
||||||
|
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` |
||||||
|
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` |
||||||
|
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` |
||||||
|
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` |
||||||
|
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` |
||||||
|
} |
||||||
|
|
||||||
|
// StorageStats is the disk I/O stats for read/write on Windows.
|
||||||
|
type StorageStats struct { |
||||||
|
ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` |
||||||
|
ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` |
||||||
|
WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` |
||||||
|
WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkStats aggregates the network stats of one container
|
||||||
|
type NetworkStats struct { |
||||||
|
// Bytes received. Windows and Linux.
|
||||||
|
RxBytes uint64 `json:"rx_bytes"` |
||||||
|
// Packets received. Windows and Linux.
|
||||||
|
RxPackets uint64 `json:"rx_packets"` |
||||||
|
// Received errors. Not used on Windows. Note that we dont `omitempty` this
|
||||||
|
// field as it is expected in the >=v1.21 API stats structure.
|
||||||
|
RxErrors uint64 `json:"rx_errors"` |
||||||
|
// Incoming packets dropped. Windows and Linux.
|
||||||
|
RxDropped uint64 `json:"rx_dropped"` |
||||||
|
// Bytes sent. Windows and Linux.
|
||||||
|
TxBytes uint64 `json:"tx_bytes"` |
||||||
|
// Packets sent. Windows and Linux.
|
||||||
|
TxPackets uint64 `json:"tx_packets"` |
||||||
|
// Sent errors. Not used on Windows. Note that we dont `omitempty` this
|
||||||
|
// field as it is expected in the >=v1.21 API stats structure.
|
||||||
|
TxErrors uint64 `json:"tx_errors"` |
||||||
|
// Outgoing packets dropped. Windows and Linux.
|
||||||
|
TxDropped uint64 `json:"tx_dropped"` |
||||||
|
// Endpoint ID. Not used on Linux.
|
||||||
|
EndpointID string `json:"endpoint_id,omitempty"` |
||||||
|
// Instance ID. Not used on Linux.
|
||||||
|
InstanceID string `json:"instance_id,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// PidsStats contains the stats of a container's pids
|
||||||
|
type PidsStats struct { |
||||||
|
// Current is the number of pids in the cgroup
|
||||||
|
Current uint64 `json:"current,omitempty"` |
||||||
|
// Limit is the hard limit on the number of pids in the cgroup.
|
||||||
|
// A "Limit" of 0 means that there is no limit.
|
||||||
|
Limit uint64 `json:"limit,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||||
|
type Stats struct { |
||||||
|
// Common stats
|
||||||
|
Read time.Time `json:"read"` |
||||||
|
PreRead time.Time `json:"preread"` |
||||||
|
|
||||||
|
// Linux specific stats, not populated on Windows.
|
||||||
|
PidsStats PidsStats `json:"pids_stats,omitempty"` |
||||||
|
BlkioStats BlkioStats `json:"blkio_stats,omitempty"` |
||||||
|
|
||||||
|
// Windows specific stats, not populated on Linux.
|
||||||
|
NumProcs uint32 `json:"num_procs"` |
||||||
|
StorageStats StorageStats `json:"storage_stats,omitempty"` |
||||||
|
|
||||||
|
// Shared stats
|
||||||
|
CPUStats CPUStats `json:"cpu_stats,omitempty"` |
||||||
|
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
|
||||||
|
MemoryStats MemoryStats `json:"memory_stats,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// StatsJSON is newly used Networks
|
||||||
|
type StatsJSON struct { |
||||||
|
Stats |
||||||
|
|
||||||
|
Name string `json:"name,omitempty"` |
||||||
|
ID string `json:"id,omitempty"` |
||||||
|
|
||||||
|
// Networks request version >=1.21
|
||||||
|
Networks map[string]NetworkStats `json:"networks,omitempty"` |
||||||
|
} |
@ -0,0 +1,30 @@ |
|||||||
|
package strslice |
||||||
|
|
||||||
|
import "encoding/json" |
||||||
|
|
||||||
|
// StrSlice represents a string or an array of strings.
|
||||||
|
// We need to override the json decoder to accept both options.
|
||||||
|
type StrSlice []string |
||||||
|
|
||||||
|
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
|
||||||
|
// strings. This method is needed to implement json.Unmarshaler.
|
||||||
|
func (e *StrSlice) UnmarshalJSON(b []byte) error { |
||||||
|
if len(b) == 0 { |
||||||
|
// With no input, we preserve the existing value by returning nil and
|
||||||
|
// leaving the target alone. This allows defining default values for
|
||||||
|
// the type.
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
p := make([]string, 0, 1) |
||||||
|
if err := json.Unmarshal(b, &p); err != nil { |
||||||
|
var s string |
||||||
|
if err := json.Unmarshal(b, &s); err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
p = append(p, s) |
||||||
|
} |
||||||
|
|
||||||
|
*e = p |
||||||
|
return nil |
||||||
|
} |
@ -0,0 +1,27 @@ |
|||||||
|
package swarm |
||||||
|
|
||||||
|
import "time" |
||||||
|
|
||||||
|
// Version represents the internal object version.
|
||||||
|
type Version struct { |
||||||
|
Index uint64 `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Meta is a base object inherited by most of the other once.
|
||||||
|
type Meta struct { |
||||||
|
Version Version `json:",omitempty"` |
||||||
|
CreatedAt time.Time `json:",omitempty"` |
||||||
|
UpdatedAt time.Time `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Annotations represents how to describe an object.
|
||||||
|
type Annotations struct { |
||||||
|
Name string `json:",omitempty"` |
||||||
|
Labels map[string]string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Driver represents a driver (network, logging).
|
||||||
|
type Driver struct { |
||||||
|
Name string `json:",omitempty"` |
||||||
|
Options map[string]string `json:",omitempty"` |
||||||
|
} |
@ -0,0 +1,46 @@ |
|||||||
|
package swarm |
||||||
|
|
||||||
|
import ( |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/container" |
||||||
|
"github.com/docker/docker/api/types/mount" |
||||||
|
) |
||||||
|
|
||||||
|
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||||
|
// Detailed documentation is available in:
|
||||||
|
// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
|
||||||
|
// `nameserver`, `search`, `options` have been supported.
|
||||||
|
// TODO: `domain` is not supported yet.
|
||||||
|
type DNSConfig struct { |
||||||
|
// Nameservers specifies the IP addresses of the name servers
|
||||||
|
Nameservers []string `json:",omitempty"` |
||||||
|
// Search specifies the search list for host-name lookup
|
||||||
|
Search []string `json:",omitempty"` |
||||||
|
// Options allows certain internal resolver variables to be modified
|
||||||
|
Options []string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerSpec represents the spec of a container.
|
||||||
|
type ContainerSpec struct { |
||||||
|
Image string `json:",omitempty"` |
||||||
|
Labels map[string]string `json:",omitempty"` |
||||||
|
Command []string `json:",omitempty"` |
||||||
|
Args []string `json:",omitempty"` |
||||||
|
Hostname string `json:",omitempty"` |
||||||
|
Env []string `json:",omitempty"` |
||||||
|
Dir string `json:",omitempty"` |
||||||
|
User string `json:",omitempty"` |
||||||
|
Groups []string `json:",omitempty"` |
||||||
|
TTY bool `json:",omitempty"` |
||||||
|
OpenStdin bool `json:",omitempty"` |
||||||
|
Mounts []mount.Mount `json:",omitempty"` |
||||||
|
StopGracePeriod *time.Duration `json:",omitempty"` |
||||||
|
Healthcheck *container.HealthConfig `json:",omitempty"` |
||||||
|
// The format of extra hosts on swarmkit is specified in:
|
||||||
|
// http://man7.org/linux/man-pages/man5/hosts.5.html
|
||||||
|
// IP_address canonical_hostname [aliases...]
|
||||||
|
Hosts []string `json:",omitempty"` |
||||||
|
DNSConfig *DNSConfig `json:",omitempty"` |
||||||
|
Secrets []*SecretReference `json:",omitempty"` |
||||||
|
} |
@ -0,0 +1,111 @@ |
|||||||
|
package swarm |
||||||
|
|
||||||
|
// Endpoint represents an endpoint.
|
||||||
|
type Endpoint struct { |
||||||
|
Spec EndpointSpec `json:",omitempty"` |
||||||
|
Ports []PortConfig `json:",omitempty"` |
||||||
|
VirtualIPs []EndpointVirtualIP `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// EndpointSpec represents the spec of an endpoint.
|
||||||
|
type EndpointSpec struct { |
||||||
|
Mode ResolutionMode `json:",omitempty"` |
||||||
|
Ports []PortConfig `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ResolutionMode represents a resolution mode.
|
||||||
|
type ResolutionMode string |
||||||
|
|
||||||
|
const ( |
||||||
|
// ResolutionModeVIP VIP
|
||||||
|
ResolutionModeVIP ResolutionMode = "vip" |
||||||
|
// ResolutionModeDNSRR DNSRR
|
||||||
|
ResolutionModeDNSRR ResolutionMode = "dnsrr" |
||||||
|
) |
||||||
|
|
||||||
|
// PortConfig represents the config of a port.
|
||||||
|
type PortConfig struct { |
||||||
|
Name string `json:",omitempty"` |
||||||
|
Protocol PortConfigProtocol `json:",omitempty"` |
||||||
|
// TargetPort is the port inside the container
|
||||||
|
TargetPort uint32 `json:",omitempty"` |
||||||
|
// PublishedPort is the port on the swarm hosts
|
||||||
|
PublishedPort uint32 `json:",omitempty"` |
||||||
|
// PublishMode is the mode in which port is published
|
||||||
|
PublishMode PortConfigPublishMode `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// PortConfigPublishMode represents the mode in which the port is to
|
||||||
|
// be published.
|
||||||
|
type PortConfigPublishMode string |
||||||
|
|
||||||
|
const ( |
||||||
|
// PortConfigPublishModeIngress is used for ports published
|
||||||
|
// for ingress load balancing using routing mesh.
|
||||||
|
PortConfigPublishModeIngress PortConfigPublishMode = "ingress" |
||||||
|
// PortConfigPublishModeHost is used for ports published
|
||||||
|
// for direct host level access on the host where the task is running.
|
||||||
|
PortConfigPublishModeHost PortConfigPublishMode = "host" |
||||||
|
) |
||||||
|
|
||||||
|
// PortConfigProtocol represents the protocol of a port.
|
||||||
|
type PortConfigProtocol string |
||||||
|
|
||||||
|
const ( |
||||||
|
// TODO(stevvooe): These should be used generally, not just for PortConfig.
|
||||||
|
|
||||||
|
// PortConfigProtocolTCP TCP
|
||||||
|
PortConfigProtocolTCP PortConfigProtocol = "tcp" |
||||||
|
// PortConfigProtocolUDP UDP
|
||||||
|
PortConfigProtocolUDP PortConfigProtocol = "udp" |
||||||
|
) |
||||||
|
|
||||||
|
// EndpointVirtualIP represents the virtual ip of a port.
|
||||||
|
type EndpointVirtualIP struct { |
||||||
|
NetworkID string `json:",omitempty"` |
||||||
|
Addr string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Network represents a network.
|
||||||
|
type Network struct { |
||||||
|
ID string |
||||||
|
Meta |
||||||
|
Spec NetworkSpec `json:",omitempty"` |
||||||
|
DriverState Driver `json:",omitempty"` |
||||||
|
IPAMOptions *IPAMOptions `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkSpec represents the spec of a network.
|
||||||
|
type NetworkSpec struct { |
||||||
|
Annotations |
||||||
|
DriverConfiguration *Driver `json:",omitempty"` |
||||||
|
IPv6Enabled bool `json:",omitempty"` |
||||||
|
Internal bool `json:",omitempty"` |
||||||
|
Attachable bool `json:",omitempty"` |
||||||
|
IPAMOptions *IPAMOptions `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkAttachmentConfig represents the configuration of a network attachment.
|
||||||
|
type NetworkAttachmentConfig struct { |
||||||
|
Target string `json:",omitempty"` |
||||||
|
Aliases []string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkAttachment represents a network attachment.
|
||||||
|
type NetworkAttachment struct { |
||||||
|
Network Network `json:",omitempty"` |
||||||
|
Addresses []string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// IPAMOptions represents ipam options.
|
||||||
|
type IPAMOptions struct { |
||||||
|
Driver Driver `json:",omitempty"` |
||||||
|
Configs []IPAMConfig `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// IPAMConfig represents ipam configuration.
|
||||||
|
type IPAMConfig struct { |
||||||
|
Subnet string `json:",omitempty"` |
||||||
|
Range string `json:",omitempty"` |
||||||
|
Gateway string `json:",omitempty"` |
||||||
|
} |
@ -0,0 +1,114 @@ |
|||||||
|
package swarm |
||||||
|
|
||||||
|
// Node represents a node.
|
||||||
|
type Node struct { |
||||||
|
ID string |
||||||
|
Meta |
||||||
|
// Spec defines the desired state of the node as specified by the user.
|
||||||
|
// The system will honor this and will *never* modify it.
|
||||||
|
Spec NodeSpec `json:",omitempty"` |
||||||
|
// Description encapsulates the properties of the Node as reported by the
|
||||||
|
// agent.
|
||||||
|
Description NodeDescription `json:",omitempty"` |
||||||
|
// Status provides the current status of the node, as seen by the manager.
|
||||||
|
Status NodeStatus `json:",omitempty"` |
||||||
|
// ManagerStatus provides the current status of the node's manager
|
||||||
|
// component, if the node is a manager.
|
||||||
|
ManagerStatus *ManagerStatus `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NodeSpec represents the spec of a node.
|
||||||
|
type NodeSpec struct { |
||||||
|
Annotations |
||||||
|
Role NodeRole `json:",omitempty"` |
||||||
|
Availability NodeAvailability `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NodeRole represents the role of a node.
|
||||||
|
type NodeRole string |
||||||
|
|
||||||
|
const ( |
||||||
|
// NodeRoleWorker WORKER
|
||||||
|
NodeRoleWorker NodeRole = "worker" |
||||||
|
// NodeRoleManager MANAGER
|
||||||
|
NodeRoleManager NodeRole = "manager" |
||||||
|
) |
||||||
|
|
||||||
|
// NodeAvailability represents the availability of a node.
|
||||||
|
type NodeAvailability string |
||||||
|
|
||||||
|
const ( |
||||||
|
// NodeAvailabilityActive ACTIVE
|
||||||
|
NodeAvailabilityActive NodeAvailability = "active" |
||||||
|
// NodeAvailabilityPause PAUSE
|
||||||
|
NodeAvailabilityPause NodeAvailability = "pause" |
||||||
|
// NodeAvailabilityDrain DRAIN
|
||||||
|
NodeAvailabilityDrain NodeAvailability = "drain" |
||||||
|
) |
||||||
|
|
||||||
|
// NodeDescription represents the description of a node.
|
||||||
|
type NodeDescription struct { |
||||||
|
Hostname string `json:",omitempty"` |
||||||
|
Platform Platform `json:",omitempty"` |
||||||
|
Resources Resources `json:",omitempty"` |
||||||
|
Engine EngineDescription `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Platform represents the platform (Arch/OS).
|
||||||
|
type Platform struct { |
||||||
|
Architecture string `json:",omitempty"` |
||||||
|
OS string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// EngineDescription represents the description of an engine.
|
||||||
|
type EngineDescription struct { |
||||||
|
EngineVersion string `json:",omitempty"` |
||||||
|
Labels map[string]string `json:",omitempty"` |
||||||
|
Plugins []PluginDescription `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// PluginDescription represents the description of an engine plugin.
|
||||||
|
type PluginDescription struct { |
||||||
|
Type string `json:",omitempty"` |
||||||
|
Name string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NodeStatus represents the status of a node.
|
||||||
|
type NodeStatus struct { |
||||||
|
State NodeState `json:",omitempty"` |
||||||
|
Message string `json:",omitempty"` |
||||||
|
Addr string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Reachability represents the reachability of a node.
|
||||||
|
type Reachability string |
||||||
|
|
||||||
|
const ( |
||||||
|
// ReachabilityUnknown UNKNOWN
|
||||||
|
ReachabilityUnknown Reachability = "unknown" |
||||||
|
// ReachabilityUnreachable UNREACHABLE
|
||||||
|
ReachabilityUnreachable Reachability = "unreachable" |
||||||
|
// ReachabilityReachable REACHABLE
|
||||||
|
ReachabilityReachable Reachability = "reachable" |
||||||
|
) |
||||||
|
|
||||||
|
// ManagerStatus represents the status of a manager.
|
||||||
|
type ManagerStatus struct { |
||||||
|
Leader bool `json:",omitempty"` |
||||||
|
Reachability Reachability `json:",omitempty"` |
||||||
|
Addr string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NodeState represents the state of a node.
|
||||||
|
type NodeState string |
||||||
|
|
||||||
|
const ( |
||||||
|
// NodeStateUnknown UNKNOWN
|
||||||
|
NodeStateUnknown NodeState = "unknown" |
||||||
|
// NodeStateDown DOWN
|
||||||
|
NodeStateDown NodeState = "down" |
||||||
|
// NodeStateReady READY
|
||||||
|
NodeStateReady NodeState = "ready" |
||||||
|
// NodeStateDisconnected DISCONNECTED
|
||||||
|
NodeStateDisconnected NodeState = "disconnected" |
||||||
|
) |
@ -0,0 +1,31 @@ |
|||||||
|
package swarm |
||||||
|
|
||||||
|
import "os" |
||||||
|
|
||||||
|
// Secret represents a secret.
|
||||||
|
type Secret struct { |
||||||
|
ID string |
||||||
|
Meta |
||||||
|
Spec SecretSpec |
||||||
|
} |
||||||
|
|
||||||
|
// SecretSpec represents a secret specification from a secret in swarm
|
||||||
|
type SecretSpec struct { |
||||||
|
Annotations |
||||||
|
Data []byte `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// SecretReferenceFileTarget is a file target in a secret reference
|
||||||
|
type SecretReferenceFileTarget struct { |
||||||
|
Name string |
||||||
|
UID string |
||||||
|
GID string |
||||||
|
Mode os.FileMode |
||||||
|
} |
||||||
|
|
||||||
|
// SecretReference is a reference to a secret in swarm
|
||||||
|
type SecretReference struct { |
||||||
|
File *SecretReferenceFileTarget |
||||||
|
SecretID string |
||||||
|
SecretName string |
||||||
|
} |
@ -0,0 +1,105 @@ |
|||||||
|
package swarm |
||||||
|
|
||||||
|
import "time" |
||||||
|
|
||||||
|
// Service represents a service.
|
||||||
|
type Service struct { |
||||||
|
ID string |
||||||
|
Meta |
||||||
|
Spec ServiceSpec `json:",omitempty"` |
||||||
|
PreviousSpec *ServiceSpec `json:",omitempty"` |
||||||
|
Endpoint Endpoint `json:",omitempty"` |
||||||
|
UpdateStatus UpdateStatus `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ServiceSpec represents the spec of a service.
|
||||||
|
type ServiceSpec struct { |
||||||
|
Annotations |
||||||
|
|
||||||
|
// TaskTemplate defines how the service should construct new tasks when
|
||||||
|
// orchestrating this service.
|
||||||
|
TaskTemplate TaskSpec `json:",omitempty"` |
||||||
|
Mode ServiceMode `json:",omitempty"` |
||||||
|
UpdateConfig *UpdateConfig `json:",omitempty"` |
||||||
|
|
||||||
|
// Networks field in ServiceSpec is deprecated. The
|
||||||
|
// same field in TaskSpec should be used instead.
|
||||||
|
// This field will be removed in a future release.
|
||||||
|
Networks []NetworkAttachmentConfig `json:",omitempty"` |
||||||
|
EndpointSpec *EndpointSpec `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ServiceMode represents the mode of a service.
|
||||||
|
type ServiceMode struct { |
||||||
|
Replicated *ReplicatedService `json:",omitempty"` |
||||||
|
Global *GlobalService `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// UpdateState is the state of a service update.
|
||||||
|
type UpdateState string |
||||||
|
|
||||||
|
const ( |
||||||
|
// UpdateStateUpdating is the updating state.
|
||||||
|
UpdateStateUpdating UpdateState = "updating" |
||||||
|
// UpdateStatePaused is the paused state.
|
||||||
|
UpdateStatePaused UpdateState = "paused" |
||||||
|
// UpdateStateCompleted is the completed state.
|
||||||
|
UpdateStateCompleted UpdateState = "completed" |
||||||
|
) |
||||||
|
|
||||||
|
// UpdateStatus reports the status of a service update.
|
||||||
|
type UpdateStatus struct { |
||||||
|
State UpdateState `json:",omitempty"` |
||||||
|
StartedAt time.Time `json:",omitempty"` |
||||||
|
CompletedAt time.Time `json:",omitempty"` |
||||||
|
Message string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ReplicatedService is a kind of ServiceMode.
|
||||||
|
type ReplicatedService struct { |
||||||
|
Replicas *uint64 `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// GlobalService is a kind of ServiceMode.
|
||||||
|
type GlobalService struct{} |
||||||
|
|
||||||
|
const ( |
||||||
|
// UpdateFailureActionPause PAUSE
|
||||||
|
UpdateFailureActionPause = "pause" |
||||||
|
// UpdateFailureActionContinue CONTINUE
|
||||||
|
UpdateFailureActionContinue = "continue" |
||||||
|
) |
||||||
|
|
||||||
|
// UpdateConfig represents the update configuration.
|
||||||
|
type UpdateConfig struct { |
||||||
|
// Maximum number of tasks to be updated in one iteration.
|
||||||
|
// 0 means unlimited parallelism.
|
||||||
|
Parallelism uint64 |
||||||
|
|
||||||
|
// Amount of time between updates.
|
||||||
|
Delay time.Duration `json:",omitempty"` |
||||||
|
|
||||||
|
// FailureAction is the action to take when an update failures.
|
||||||
|
FailureAction string `json:",omitempty"` |
||||||
|
|
||||||
|
// Monitor indicates how long to monitor a task for failure after it is
|
||||||
|
// created. If the task fails by ending up in one of the states
|
||||||
|
// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
|
||||||
|
// this counts as a failure. If it fails after Monitor, it does not
|
||||||
|
// count as a failure. If Monitor is unspecified, a default value will
|
||||||
|
// be used.
|
||||||
|
Monitor time.Duration `json:",omitempty"` |
||||||
|
|
||||||
|
// MaxFailureRatio is the fraction of tasks that may fail during
|
||||||
|
// an update before the failure action is invoked. Any task created by
|
||||||
|
// the current update which ends up in one of the states REJECTED,
|
||||||
|
// COMPLETED or FAILED within Monitor from its creation counts as a
|
||||||
|
// failure. The number of failures is divided by the number of tasks
|
||||||
|
// being updated, and if this fraction is greater than
|
||||||
|
// MaxFailureRatio, the failure action is invoked.
|
||||||
|
//
|
||||||
|
// If the failure action is CONTINUE, there is no effect.
|
||||||
|
// If the failure action is PAUSE, no more tasks will be updated until
|
||||||
|
// another update is started.
|
||||||
|
MaxFailureRatio float32 |
||||||
|
} |
@ -0,0 +1,197 @@ |
|||||||
|
package swarm |
||||||
|
|
||||||
|
import "time" |
||||||
|
|
||||||
|
// ClusterInfo represents info about the cluster for outputing in "info"
|
||||||
|
// it contains the same information as "Swarm", but without the JoinTokens
|
||||||
|
type ClusterInfo struct { |
||||||
|
ID string |
||||||
|
Meta |
||||||
|
Spec Spec |
||||||
|
} |
||||||
|
|
||||||
|
// Swarm represents a swarm.
|
||||||
|
type Swarm struct { |
||||||
|
ClusterInfo |
||||||
|
JoinTokens JoinTokens |
||||||
|
} |
||||||
|
|
||||||
|
// JoinTokens contains the tokens workers and managers need to join the swarm.
|
||||||
|
type JoinTokens struct { |
||||||
|
// Worker is the join token workers may use to join the swarm.
|
||||||
|
Worker string |
||||||
|
// Manager is the join token managers may use to join the swarm.
|
||||||
|
Manager string |
||||||
|
} |
||||||
|
|
||||||
|
// Spec represents the spec of a swarm.
|
||||||
|
type Spec struct { |
||||||
|
Annotations |
||||||
|
|
||||||
|
Orchestration OrchestrationConfig `json:",omitempty"` |
||||||
|
Raft RaftConfig `json:",omitempty"` |
||||||
|
Dispatcher DispatcherConfig `json:",omitempty"` |
||||||
|
CAConfig CAConfig `json:",omitempty"` |
||||||
|
TaskDefaults TaskDefaults `json:",omitempty"` |
||||||
|
EncryptionConfig EncryptionConfig `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// OrchestrationConfig represents orchestration configuration.
|
||||||
|
type OrchestrationConfig struct { |
||||||
|
// TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
|
||||||
|
// node. If negative, never remove completed or failed tasks.
|
||||||
|
TaskHistoryRetentionLimit *int64 `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// TaskDefaults parameterizes cluster-level task creation with default values.
|
||||||
|
type TaskDefaults struct { |
||||||
|
// LogDriver selects the log driver to use for tasks created in the
|
||||||
|
// orchestrator if unspecified by a service.
|
||||||
|
//
|
||||||
|
// Updating this value will only have an affect on new tasks. Old tasks
|
||||||
|
// will continue use their previously configured log driver until
|
||||||
|
// recreated.
|
||||||
|
LogDriver *Driver `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// EncryptionConfig controls at-rest encryption of data and keys.
|
||||||
|
type EncryptionConfig struct { |
||||||
|
// AutoLockManagers specifies whether or not managers TLS keys and raft data
|
||||||
|
// should be encrypted at rest in such a way that they must be unlocked
|
||||||
|
// before the manager node starts up again.
|
||||||
|
AutoLockManagers bool |
||||||
|
} |
||||||
|
|
||||||
|
// RaftConfig represents raft configuration.
|
||||||
|
type RaftConfig struct { |
||||||
|
// SnapshotInterval is the number of log entries between snapshots.
|
||||||
|
SnapshotInterval uint64 `json:",omitempty"` |
||||||
|
|
||||||
|
// KeepOldSnapshots is the number of snapshots to keep beyond the
|
||||||
|
// current snapshot.
|
||||||
|
KeepOldSnapshots *uint64 `json:",omitempty"` |
||||||
|
|
||||||
|
// LogEntriesForSlowFollowers is the number of log entries to keep
|
||||||
|
// around to sync up slow followers after a snapshot is created.
|
||||||
|
LogEntriesForSlowFollowers uint64 `json:",omitempty"` |
||||||
|
|
||||||
|
// ElectionTick is the number of ticks that a follower will wait for a message
|
||||||
|
// from the leader before becoming a candidate and starting an election.
|
||||||
|
// ElectionTick must be greater than HeartbeatTick.
|
||||||
|
//
|
||||||
|
// A tick currently defaults to one second, so these translate directly to
|
||||||
|
// seconds currently, but this is NOT guaranteed.
|
||||||
|
ElectionTick int |
||||||
|
|
||||||
|
// HeartbeatTick is the number of ticks between heartbeats. Every
|
||||||
|
// HeartbeatTick ticks, the leader will send a heartbeat to the
|
||||||
|
// followers.
|
||||||
|
//
|
||||||
|
// A tick currently defaults to one second, so these translate directly to
|
||||||
|
// seconds currently, but this is NOT guaranteed.
|
||||||
|
HeartbeatTick int |
||||||
|
} |
||||||
|
|
||||||
|
// DispatcherConfig represents dispatcher configuration.
|
||||||
|
type DispatcherConfig struct { |
||||||
|
// HeartbeatPeriod defines how often agent should send heartbeats to
|
||||||
|
// dispatcher.
|
||||||
|
HeartbeatPeriod time.Duration `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// CAConfig represents CA configuration.
|
||||||
|
type CAConfig struct { |
||||||
|
// NodeCertExpiry is the duration certificates should be issued for
|
||||||
|
NodeCertExpiry time.Duration `json:",omitempty"` |
||||||
|
|
||||||
|
// ExternalCAs is a list of CAs to which a manager node will make
|
||||||
|
// certificate signing requests for node certificates.
|
||||||
|
ExternalCAs []*ExternalCA `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ExternalCAProtocol represents type of external CA.
|
||||||
|
type ExternalCAProtocol string |
||||||
|
|
||||||
|
// ExternalCAProtocolCFSSL CFSSL
|
||||||
|
const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" |
||||||
|
|
||||||
|
// ExternalCA defines external CA to be used by the cluster.
|
||||||
|
type ExternalCA struct { |
||||||
|
// Protocol is the protocol used by this external CA.
|
||||||
|
Protocol ExternalCAProtocol |
||||||
|
|
||||||
|
// URL is the URL where the external CA can be reached.
|
||||||
|
URL string |
||||||
|
|
||||||
|
// Options is a set of additional key/value pairs whose interpretation
|
||||||
|
// depends on the specified CA type.
|
||||||
|
Options map[string]string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// InitRequest is the request used to init a swarm.
|
||||||
|
type InitRequest struct { |
||||||
|
ListenAddr string |
||||||
|
AdvertiseAddr string |
||||||
|
ForceNewCluster bool |
||||||
|
Spec Spec |
||||||
|
AutoLockManagers bool |
||||||
|
} |
||||||
|
|
||||||
|
// JoinRequest is the request used to join a swarm.
|
||||||
|
type JoinRequest struct { |
||||||
|
ListenAddr string |
||||||
|
AdvertiseAddr string |
||||||
|
RemoteAddrs []string |
||||||
|
JoinToken string // accept by secret
|
||||||
|
} |
||||||
|
|
||||||
|
// UnlockRequest is the request used to unlock a swarm.
|
||||||
|
type UnlockRequest struct { |
||||||
|
// UnlockKey is the unlock key in ASCII-armored format.
|
||||||
|
UnlockKey string |
||||||
|
} |
||||||
|
|
||||||
|
// LocalNodeState represents the state of the local node.
|
||||||
|
type LocalNodeState string |
||||||
|
|
||||||
|
const ( |
||||||
|
// LocalNodeStateInactive INACTIVE
|
||||||
|
LocalNodeStateInactive LocalNodeState = "inactive" |
||||||
|
// LocalNodeStatePending PENDING
|
||||||
|
LocalNodeStatePending LocalNodeState = "pending" |
||||||
|
// LocalNodeStateActive ACTIVE
|
||||||
|
LocalNodeStateActive LocalNodeState = "active" |
||||||
|
// LocalNodeStateError ERROR
|
||||||
|
LocalNodeStateError LocalNodeState = "error" |
||||||
|
// LocalNodeStateLocked LOCKED
|
||||||
|
LocalNodeStateLocked LocalNodeState = "locked" |
||||||
|
) |
||||||
|
|
||||||
|
// Info represents generic information about swarm.
|
||||||
|
type Info struct { |
||||||
|
NodeID string |
||||||
|
NodeAddr string |
||||||
|
|
||||||
|
LocalNodeState LocalNodeState |
||||||
|
ControlAvailable bool |
||||||
|
Error string |
||||||
|
|
||||||
|
RemoteManagers []Peer |
||||||
|
Nodes int |
||||||
|
Managers int |
||||||
|
|
||||||
|
Cluster ClusterInfo |
||||||
|
} |
||||||
|
|
||||||
|
// Peer represents a peer.
|
||||||
|
type Peer struct { |
||||||
|
NodeID string |
||||||
|
Addr string |
||||||
|
} |
||||||
|
|
||||||
|
// UpdateFlags contains flags for SwarmUpdate.
|
||||||
|
type UpdateFlags struct { |
||||||
|
RotateWorkerToken bool |
||||||
|
RotateManagerToken bool |
||||||
|
RotateManagerUnlockKey bool |
||||||
|
} |
@ -0,0 +1,128 @@ |
|||||||
|
package swarm |
||||||
|
|
||||||
|
import "time" |
||||||
|
|
||||||
|
// TaskState represents the state of a task.
|
||||||
|
type TaskState string |
||||||
|
|
||||||
|
const ( |
||||||
|
// TaskStateNew NEW
|
||||||
|
TaskStateNew TaskState = "new" |
||||||
|
// TaskStateAllocated ALLOCATED
|
||||||
|
TaskStateAllocated TaskState = "allocated" |
||||||
|
// TaskStatePending PENDING
|
||||||
|
TaskStatePending TaskState = "pending" |
||||||
|
// TaskStateAssigned ASSIGNED
|
||||||
|
TaskStateAssigned TaskState = "assigned" |
||||||
|
// TaskStateAccepted ACCEPTED
|
||||||
|
TaskStateAccepted TaskState = "accepted" |
||||||
|
// TaskStatePreparing PREPARING
|
||||||
|
TaskStatePreparing TaskState = "preparing" |
||||||
|
// TaskStateReady READY
|
||||||
|
TaskStateReady TaskState = "ready" |
||||||
|
// TaskStateStarting STARTING
|
||||||
|
TaskStateStarting TaskState = "starting" |
||||||
|
// TaskStateRunning RUNNING
|
||||||
|
TaskStateRunning TaskState = "running" |
||||||
|
// TaskStateComplete COMPLETE
|
||||||
|
TaskStateComplete TaskState = "complete" |
||||||
|
// TaskStateShutdown SHUTDOWN
|
||||||
|
TaskStateShutdown TaskState = "shutdown" |
||||||
|
// TaskStateFailed FAILED
|
||||||
|
TaskStateFailed TaskState = "failed" |
||||||
|
// TaskStateRejected REJECTED
|
||||||
|
TaskStateRejected TaskState = "rejected" |
||||||
|
) |
||||||
|
|
||||||
|
// Task represents a task.
|
||||||
|
type Task struct { |
||||||
|
ID string |
||||||
|
Meta |
||||||
|
Annotations |
||||||
|
|
||||||
|
Spec TaskSpec `json:",omitempty"` |
||||||
|
ServiceID string `json:",omitempty"` |
||||||
|
Slot int `json:",omitempty"` |
||||||
|
NodeID string `json:",omitempty"` |
||||||
|
Status TaskStatus `json:",omitempty"` |
||||||
|
DesiredState TaskState `json:",omitempty"` |
||||||
|
NetworksAttachments []NetworkAttachment `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// TaskSpec represents the spec of a task.
|
||||||
|
type TaskSpec struct { |
||||||
|
ContainerSpec ContainerSpec `json:",omitempty"` |
||||||
|
Resources *ResourceRequirements `json:",omitempty"` |
||||||
|
RestartPolicy *RestartPolicy `json:",omitempty"` |
||||||
|
Placement *Placement `json:",omitempty"` |
||||||
|
Networks []NetworkAttachmentConfig `json:",omitempty"` |
||||||
|
|
||||||
|
// LogDriver specifies the LogDriver to use for tasks created from this
|
||||||
|
// spec. If not present, the one on cluster default on swarm.Spec will be
|
||||||
|
// used, finally falling back to the engine default if not specified.
|
||||||
|
LogDriver *Driver `json:",omitempty"` |
||||||
|
|
||||||
|
// ForceUpdate is a counter that triggers an update even if no relevant
|
||||||
|
// parameters have been changed.
|
||||||
|
ForceUpdate uint64 |
||||||
|
} |
||||||
|
|
||||||
|
// Resources represents resources (CPU/Memory).
|
||||||
|
type Resources struct { |
||||||
|
NanoCPUs int64 `json:",omitempty"` |
||||||
|
MemoryBytes int64 `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ResourceRequirements represents resources requirements.
|
||||||
|
type ResourceRequirements struct { |
||||||
|
Limits *Resources `json:",omitempty"` |
||||||
|
Reservations *Resources `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Placement represents orchestration parameters.
|
||||||
|
type Placement struct { |
||||||
|
Constraints []string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// RestartPolicy represents the restart policy.
|
||||||
|
type RestartPolicy struct { |
||||||
|
Condition RestartPolicyCondition `json:",omitempty"` |
||||||
|
Delay *time.Duration `json:",omitempty"` |
||||||
|
MaxAttempts *uint64 `json:",omitempty"` |
||||||
|
Window *time.Duration `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// RestartPolicyCondition represents when to restart.
|
||||||
|
type RestartPolicyCondition string |
||||||
|
|
||||||
|
const ( |
||||||
|
// RestartPolicyConditionNone NONE
|
||||||
|
RestartPolicyConditionNone RestartPolicyCondition = "none" |
||||||
|
// RestartPolicyConditionOnFailure ON_FAILURE
|
||||||
|
RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" |
||||||
|
// RestartPolicyConditionAny ANY
|
||||||
|
RestartPolicyConditionAny RestartPolicyCondition = "any" |
||||||
|
) |
||||||
|
|
||||||
|
// TaskStatus represents the status of a task.
|
||||||
|
type TaskStatus struct { |
||||||
|
Timestamp time.Time `json:",omitempty"` |
||||||
|
State TaskState `json:",omitempty"` |
||||||
|
Message string `json:",omitempty"` |
||||||
|
Err string `json:",omitempty"` |
||||||
|
ContainerStatus ContainerStatus `json:",omitempty"` |
||||||
|
PortStatus PortStatus `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerStatus represents the status of a container.
|
||||||
|
type ContainerStatus struct { |
||||||
|
ContainerID string `json:",omitempty"` |
||||||
|
PID int `json:",omitempty"` |
||||||
|
ExitCode int `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// PortStatus represents the port status of a task's host ports whose
|
||||||
|
// service has published host ports
|
||||||
|
type PortStatus struct { |
||||||
|
Ports []PortConfig `json:",omitempty"` |
||||||
|
} |
@ -0,0 +1,12 @@ |
|||||||
|
package time |
||||||
|
|
||||||
|
import ( |
||||||
|
"strconv" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// DurationToSecondsString converts the specified duration to the number
|
||||||
|
// seconds it represents, formatted as a string.
|
||||||
|
func DurationToSecondsString(duration time.Duration) string { |
||||||
|
return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) |
||||||
|
} |
@ -0,0 +1,124 @@ |
|||||||
|
package time |
||||||
|
|
||||||
|
import ( |
||||||
|
"fmt" |
||||||
|
"math" |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
) |
||||||
|
|
||||||
|
// These are additional predefined layouts for use in Time.Format and Time.Parse
|
||||||
|
// with --since and --until parameters for `docker logs` and `docker events`
|
||||||
|
const ( |
||||||
|
rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
|
||||||
|
rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
|
||||||
|
dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
|
||||||
|
dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
|
||||||
|
) |
||||||
|
|
||||||
|
// GetTimestamp tries to parse given string as golang duration,
|
||||||
|
// then RFC3339 time and finally as a Unix timestamp. If
|
||||||
|
// any of these were successful, it returns a Unix timestamp
|
||||||
|
// as string otherwise returns the given value back.
|
||||||
|
// In case of duration input, the returned timestamp is computed
|
||||||
|
// as the given reference time minus the amount of the duration.
|
||||||
|
func GetTimestamp(value string, reference time.Time) (string, error) { |
||||||
|
if d, err := time.ParseDuration(value); value != "0" && err == nil { |
||||||
|
return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil |
||||||
|
} |
||||||
|
|
||||||
|
var format string |
||||||
|
var parseInLocation bool |
||||||
|
|
||||||
|
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
|
||||||
|
parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) |
||||||
|
|
||||||
|
if strings.Contains(value, ".") { |
||||||
|
if parseInLocation { |
||||||
|
format = rFC3339NanoLocal |
||||||
|
} else { |
||||||
|
format = time.RFC3339Nano |
||||||
|
} |
||||||
|
} else if strings.Contains(value, "T") { |
||||||
|
// we want the number of colons in the T portion of the timestamp
|
||||||
|
tcolons := strings.Count(value, ":") |
||||||
|
// if parseInLocation is off and we have a +/- zone offset (not Z) then
|
||||||
|
// there will be an extra colon in the input for the tz offset subtract that
|
||||||
|
// colon from the tcolons count
|
||||||
|
if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { |
||||||
|
tcolons-- |
||||||
|
} |
||||||
|
if parseInLocation { |
||||||
|
switch tcolons { |
||||||
|
case 0: |
||||||
|
format = "2006-01-02T15" |
||||||
|
case 1: |
||||||
|
format = "2006-01-02T15:04" |
||||||
|
default: |
||||||
|
format = rFC3339Local |
||||||
|
} |
||||||
|
} else { |
||||||
|
switch tcolons { |
||||||
|
case 0: |
||||||
|
format = "2006-01-02T15Z07:00" |
||||||
|
case 1: |
||||||
|
format = "2006-01-02T15:04Z07:00" |
||||||
|
default: |
||||||
|
format = time.RFC3339 |
||||||
|
} |
||||||
|
} |
||||||
|
} else if parseInLocation { |
||||||
|
format = dateLocal |
||||||
|
} else { |
||||||
|
format = dateWithZone |
||||||
|
} |
||||||
|
|
||||||
|
var t time.Time |
||||||
|
var err error |
||||||
|
|
||||||
|
if parseInLocation { |
||||||
|
t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) |
||||||
|
} else { |
||||||
|
t, err = time.Parse(format, value) |
||||||
|
} |
||||||
|
|
||||||
|
if err != nil { |
||||||
|
// if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp
|
||||||
|
if strings.Contains(value, "-") { |
||||||
|
return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
|
||||||
|
} |
||||||
|
return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
|
||||||
|
} |
||||||
|
|
||||||
|
return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil |
||||||
|
} |
||||||
|
|
||||||
|
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
|
||||||
|
// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
|
||||||
|
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
|
||||||
|
// converted to nanoseconds. The expectation is that the seconds and
|
||||||
|
// seconds will be used to create a time variable. For example:
|
||||||
|
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
|
||||||
|
// if err == nil since := time.Unix(seconds, nanoseconds)
|
||||||
|
// returns seconds as def(aultSeconds) if value == ""
|
||||||
|
func ParseTimestamps(value string, def int64) (int64, int64, error) { |
||||||
|
if value == "" { |
||||||
|
return def, 0, nil |
||||||
|
} |
||||||
|
sa := strings.SplitN(value, ".", 2) |
||||||
|
s, err := strconv.ParseInt(sa[0], 10, 64) |
||||||
|
if err != nil { |
||||||
|
return s, 0, err |
||||||
|
} |
||||||
|
if len(sa) != 2 { |
||||||
|
return s, 0, nil |
||||||
|
} |
||||||
|
n, err := strconv.ParseInt(sa[1], 10, 64) |
||||||
|
if err != nil { |
||||||
|
return s, n, err |
||||||
|
} |
||||||
|
// should already be in nanoseconds but just in case convert n to nanoseonds
|
||||||
|
n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) |
||||||
|
return s, n, nil |
||||||
|
} |
@ -0,0 +1,549 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
import ( |
||||||
|
"errors" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"os" |
||||||
|
"strings" |
||||||
|
"time" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/container" |
||||||
|
"github.com/docker/docker/api/types/filters" |
||||||
|
"github.com/docker/docker/api/types/mount" |
||||||
|
"github.com/docker/docker/api/types/network" |
||||||
|
"github.com/docker/docker/api/types/registry" |
||||||
|
"github.com/docker/docker/api/types/swarm" |
||||||
|
"github.com/docker/go-connections/nat" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerChange contains response of Engine API:
|
||||||
|
// GET "/containers/{name:.*}/changes"
|
||||||
|
type ContainerChange struct { |
||||||
|
Kind int |
||||||
|
Path string |
||||||
|
} |
||||||
|
|
||||||
|
// ImageHistory contains response of Engine API:
|
||||||
|
// GET "/images/{name:.*}/history"
|
||||||
|
type ImageHistory struct { |
||||||
|
ID string `json:"Id"` |
||||||
|
Created int64 |
||||||
|
CreatedBy string |
||||||
|
Tags []string |
||||||
|
Size int64 |
||||||
|
Comment string |
||||||
|
} |
||||||
|
|
||||||
|
// ImageDelete contains response of Engine API:
|
||||||
|
// DELETE "/images/{name:.*}"
|
||||||
|
type ImageDelete struct { |
||||||
|
Untagged string `json:",omitempty"` |
||||||
|
Deleted string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// GraphDriverData returns Image's graph driver config info
|
||||||
|
// when calling inspect command
|
||||||
|
type GraphDriverData struct { |
||||||
|
Name string |
||||||
|
Data map[string]string |
||||||
|
} |
||||||
|
|
||||||
|
// RootFS returns Image's RootFS description including the layer IDs.
|
||||||
|
type RootFS struct { |
||||||
|
Type string |
||||||
|
Layers []string `json:",omitempty"` |
||||||
|
BaseLayer string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ImageInspect contains response of Engine API:
|
||||||
|
// GET "/images/{name:.*}/json"
|
||||||
|
type ImageInspect struct { |
||||||
|
ID string `json:"Id"` |
||||||
|
RepoTags []string |
||||||
|
RepoDigests []string |
||||||
|
Parent string |
||||||
|
Comment string |
||||||
|
Created string |
||||||
|
Container string |
||||||
|
ContainerConfig *container.Config |
||||||
|
DockerVersion string |
||||||
|
Author string |
||||||
|
Config *container.Config |
||||||
|
Architecture string |
||||||
|
Os string |
||||||
|
OsVersion string `json:",omitempty"` |
||||||
|
Size int64 |
||||||
|
VirtualSize int64 |
||||||
|
GraphDriver GraphDriverData |
||||||
|
RootFS RootFS |
||||||
|
} |
||||||
|
|
||||||
|
// Container contains response of Engine API:
|
||||||
|
// GET "/containers/json"
|
||||||
|
type Container struct { |
||||||
|
ID string `json:"Id"` |
||||||
|
Names []string |
||||||
|
Image string |
||||||
|
ImageID string |
||||||
|
Command string |
||||||
|
Created int64 |
||||||
|
Ports []Port |
||||||
|
SizeRw int64 `json:",omitempty"` |
||||||
|
SizeRootFs int64 `json:",omitempty"` |
||||||
|
Labels map[string]string |
||||||
|
State string |
||||||
|
Status string |
||||||
|
HostConfig struct { |
||||||
|
NetworkMode string `json:",omitempty"` |
||||||
|
} |
||||||
|
NetworkSettings *SummaryNetworkSettings |
||||||
|
Mounts []MountPoint |
||||||
|
} |
||||||
|
|
||||||
|
// CopyConfig contains request body of Engine API:
|
||||||
|
// POST "/containers/"+containerID+"/copy"
|
||||||
|
type CopyConfig struct { |
||||||
|
Resource string |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerPathStat is used to encode the header from
|
||||||
|
// GET "/containers/{name:.*}/archive"
|
||||||
|
// "Name" is the file or directory name.
|
||||||
|
type ContainerPathStat struct { |
||||||
|
Name string `json:"name"` |
||||||
|
Size int64 `json:"size"` |
||||||
|
Mode os.FileMode `json:"mode"` |
||||||
|
Mtime time.Time `json:"mtime"` |
||||||
|
LinkTarget string `json:"linkTarget"` |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerStats contains response of Engine API:
|
||||||
|
// GET "/stats"
|
||||||
|
type ContainerStats struct { |
||||||
|
Body io.ReadCloser `json:"body"` |
||||||
|
OSType string `json:"ostype"` |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerProcessList contains response of Engine API:
|
||||||
|
// GET "/containers/{name:.*}/top"
|
||||||
|
type ContainerProcessList struct { |
||||||
|
Processes [][]string |
||||||
|
Titles []string |
||||||
|
} |
||||||
|
|
||||||
|
// Ping contains response of Engine API:
|
||||||
|
// GET "/_ping"
|
||||||
|
type Ping struct { |
||||||
|
APIVersion string |
||||||
|
Experimental bool |
||||||
|
} |
||||||
|
|
||||||
|
// Version contains response of Engine API:
|
||||||
|
// GET "/version"
|
||||||
|
type Version struct { |
||||||
|
Version string |
||||||
|
APIVersion string `json:"ApiVersion"` |
||||||
|
MinAPIVersion string `json:"MinAPIVersion,omitempty"` |
||||||
|
GitCommit string |
||||||
|
GoVersion string |
||||||
|
Os string |
||||||
|
Arch string |
||||||
|
KernelVersion string `json:",omitempty"` |
||||||
|
Experimental bool `json:",omitempty"` |
||||||
|
BuildTime string `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// Commit records a external tool actual commit id version along the
|
||||||
|
// one expect by dockerd as set at build time
|
||||||
|
type Commit struct { |
||||||
|
ID string |
||||||
|
Expected string |
||||||
|
} |
||||||
|
|
||||||
|
// Info contains response of Engine API:
|
||||||
|
// GET "/info"
|
||||||
|
type Info struct { |
||||||
|
ID string |
||||||
|
Containers int |
||||||
|
ContainersRunning int |
||||||
|
ContainersPaused int |
||||||
|
ContainersStopped int |
||||||
|
Images int |
||||||
|
Driver string |
||||||
|
DriverStatus [][2]string |
||||||
|
SystemStatus [][2]string |
||||||
|
Plugins PluginsInfo |
||||||
|
MemoryLimit bool |
||||||
|
SwapLimit bool |
||||||
|
KernelMemory bool |
||||||
|
CPUCfsPeriod bool `json:"CpuCfsPeriod"` |
||||||
|
CPUCfsQuota bool `json:"CpuCfsQuota"` |
||||||
|
CPUShares bool |
||||||
|
CPUSet bool |
||||||
|
IPv4Forwarding bool |
||||||
|
BridgeNfIptables bool |
||||||
|
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` |
||||||
|
Debug bool |
||||||
|
NFd int |
||||||
|
OomKillDisable bool |
||||||
|
NGoroutines int |
||||||
|
SystemTime string |
||||||
|
LoggingDriver string |
||||||
|
CgroupDriver string |
||||||
|
NEventsListener int |
||||||
|
KernelVersion string |
||||||
|
OperatingSystem string |
||||||
|
OSType string |
||||||
|
Architecture string |
||||||
|
IndexServerAddress string |
||||||
|
RegistryConfig *registry.ServiceConfig |
||||||
|
NCPU int |
||||||
|
MemTotal int64 |
||||||
|
DockerRootDir string |
||||||
|
HTTPProxy string `json:"HttpProxy"` |
||||||
|
HTTPSProxy string `json:"HttpsProxy"` |
||||||
|
NoProxy string |
||||||
|
Name string |
||||||
|
Labels []string |
||||||
|
ExperimentalBuild bool |
||||||
|
ServerVersion string |
||||||
|
ClusterStore string |
||||||
|
ClusterAdvertise string |
||||||
|
Runtimes map[string]Runtime |
||||||
|
DefaultRuntime string |
||||||
|
Swarm swarm.Info |
||||||
|
// LiveRestoreEnabled determines whether containers should be kept
|
||||||
|
// running when the daemon is shutdown or upon daemon start if
|
||||||
|
// running containers are detected
|
||||||
|
LiveRestoreEnabled bool |
||||||
|
Isolation container.Isolation |
||||||
|
InitBinary string |
||||||
|
ContainerdCommit Commit |
||||||
|
RuncCommit Commit |
||||||
|
InitCommit Commit |
||||||
|
SecurityOptions []string |
||||||
|
} |
||||||
|
|
||||||
|
// KeyValue holds a key/value pair
|
||||||
|
type KeyValue struct { |
||||||
|
Key, Value string |
||||||
|
} |
||||||
|
|
||||||
|
// SecurityOpt contains the name and options of a security option
|
||||||
|
type SecurityOpt struct { |
||||||
|
Name string |
||||||
|
Options []KeyValue |
||||||
|
} |
||||||
|
|
||||||
|
// DecodeSecurityOptions decodes a security options string slice to a type safe
|
||||||
|
// SecurityOpt
|
||||||
|
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { |
||||||
|
so := []SecurityOpt{} |
||||||
|
for _, opt := range opts { |
||||||
|
// support output from a < 1.13 docker daemon
|
||||||
|
if !strings.Contains(opt, "=") { |
||||||
|
so = append(so, SecurityOpt{Name: opt}) |
||||||
|
continue |
||||||
|
} |
||||||
|
secopt := SecurityOpt{} |
||||||
|
split := strings.Split(opt, ",") |
||||||
|
for _, s := range split { |
||||||
|
kv := strings.SplitN(s, "=", 2) |
||||||
|
if len(kv) != 2 { |
||||||
|
return nil, fmt.Errorf("invalid security option %q", s) |
||||||
|
} |
||||||
|
if kv[0] == "" || kv[1] == "" { |
||||||
|
return nil, errors.New("invalid empty security option") |
||||||
|
} |
||||||
|
if kv[0] == "name" { |
||||||
|
secopt.Name = kv[1] |
||||||
|
continue |
||||||
|
} |
||||||
|
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) |
||||||
|
} |
||||||
|
so = append(so, secopt) |
||||||
|
} |
||||||
|
return so, nil |
||||||
|
} |
||||||
|
|
||||||
|
// PluginsInfo is a temp struct holding Plugins name
|
||||||
|
// registered with docker daemon. It is used by Info struct
|
||||||
|
type PluginsInfo struct { |
||||||
|
// List of Volume plugins registered
|
||||||
|
Volume []string |
||||||
|
// List of Network plugins registered
|
||||||
|
Network []string |
||||||
|
// List of Authorization plugins registered
|
||||||
|
Authorization []string |
||||||
|
} |
||||||
|
|
||||||
|
// ExecStartCheck is a temp struct used by execStart
|
||||||
|
// Config fields is part of ExecConfig in runconfig package
|
||||||
|
type ExecStartCheck struct { |
||||||
|
// ExecStart will first check if it's detached
|
||||||
|
Detach bool |
||||||
|
// Check if there's a tty
|
||||||
|
Tty bool |
||||||
|
} |
||||||
|
|
||||||
|
// HealthcheckResult stores information about a single run of a healthcheck probe
|
||||||
|
type HealthcheckResult struct { |
||||||
|
Start time.Time // Start is the time this check started
|
||||||
|
End time.Time // End is the time this check ended
|
||||||
|
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
|
||||||
|
Output string // Output from last check
|
||||||
|
} |
||||||
|
|
||||||
|
// Health states
|
||||||
|
const ( |
||||||
|
NoHealthcheck = "none" // Indicates there is no healthcheck
|
||||||
|
Starting = "starting" // Starting indicates that the container is not yet ready
|
||||||
|
Healthy = "healthy" // Healthy indicates that the container is running correctly
|
||||||
|
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
|
||||||
|
) |
||||||
|
|
||||||
|
// Health stores information about the container's healthcheck results
|
||||||
|
type Health struct { |
||||||
|
Status string // Status is one of Starting, Healthy or Unhealthy
|
||||||
|
FailingStreak int // FailingStreak is the number of consecutive failures
|
||||||
|
Log []*HealthcheckResult // Log contains the last few results (oldest first)
|
||||||
|
} |
||||||
|
|
||||||
|
// ContainerState stores container's running state
|
||||||
|
// it's part of ContainerJSONBase and will return by "inspect" command
|
||||||
|
type ContainerState struct { |
||||||
|
Status string |
||||||
|
Running bool |
||||||
|
Paused bool |
||||||
|
Restarting bool |
||||||
|
OOMKilled bool |
||||||
|
Dead bool |
||||||
|
Pid int |
||||||
|
ExitCode int |
||||||
|
Error string |
||||||
|
StartedAt string |
||||||
|
FinishedAt string |
||||||
|
Health *Health `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerNode stores information about the node that a container
|
||||||
|
// is running on. It's only available in Docker Swarm
|
||||||
|
type ContainerNode struct { |
||||||
|
ID string |
||||||
|
IPAddress string `json:"IP"` |
||||||
|
Addr string |
||||||
|
Name string |
||||||
|
Cpus int |
||||||
|
Memory int64 |
||||||
|
Labels map[string]string |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerJSONBase contains response of Engine API:
|
||||||
|
// GET "/containers/{name:.*}/json"
|
||||||
|
type ContainerJSONBase struct { |
||||||
|
ID string `json:"Id"` |
||||||
|
Created string |
||||||
|
Path string |
||||||
|
Args []string |
||||||
|
State *ContainerState |
||||||
|
Image string |
||||||
|
ResolvConfPath string |
||||||
|
HostnamePath string |
||||||
|
HostsPath string |
||||||
|
LogPath string |
||||||
|
Node *ContainerNode `json:",omitempty"` |
||||||
|
Name string |
||||||
|
RestartCount int |
||||||
|
Driver string |
||||||
|
MountLabel string |
||||||
|
ProcessLabel string |
||||||
|
AppArmorProfile string |
||||||
|
ExecIDs []string |
||||||
|
HostConfig *container.HostConfig |
||||||
|
GraphDriver GraphDriverData |
||||||
|
SizeRw *int64 `json:",omitempty"` |
||||||
|
SizeRootFs *int64 `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerJSON is newly used struct along with MountPoint
|
||||||
|
type ContainerJSON struct { |
||||||
|
*ContainerJSONBase |
||||||
|
Mounts []MountPoint |
||||||
|
Config *container.Config |
||||||
|
NetworkSettings *NetworkSettings |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkSettings exposes the network settings in the api
|
||||||
|
type NetworkSettings struct { |
||||||
|
NetworkSettingsBase |
||||||
|
DefaultNetworkSettings |
||||||
|
Networks map[string]*network.EndpointSettings |
||||||
|
} |
||||||
|
|
||||||
|
// SummaryNetworkSettings provides a summary of container's networks
|
||||||
|
// in /containers/json
|
||||||
|
type SummaryNetworkSettings struct { |
||||||
|
Networks map[string]*network.EndpointSettings |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkSettingsBase holds basic information about networks
|
||||||
|
type NetworkSettingsBase struct { |
||||||
|
Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
|
||||||
|
SandboxID string // SandboxID uniquely represents a container's network stack
|
||||||
|
HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
|
||||||
|
LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
|
||||||
|
LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
|
||||||
|
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
|
||||||
|
SandboxKey string // SandboxKey identifies the sandbox
|
||||||
|
SecondaryIPAddresses []network.Address |
||||||
|
SecondaryIPv6Addresses []network.Address |
||||||
|
} |
||||||
|
|
||||||
|
// DefaultNetworkSettings holds network information
|
||||||
|
// during the 2 release deprecation period.
|
||||||
|
// It will be removed in Docker 1.11.
|
||||||
|
type DefaultNetworkSettings struct { |
||||||
|
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
|
||||||
|
Gateway string // Gateway holds the gateway address for the network
|
||||||
|
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
|
||||||
|
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
|
||||||
|
IPAddress string // IPAddress holds the IPv4 address for the network
|
||||||
|
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
|
||||||
|
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
|
||||||
|
MacAddress string // MacAddress holds the MAC address for the network
|
||||||
|
} |
||||||
|
|
||||||
|
// MountPoint represents a mount point configuration inside the container.
|
||||||
|
// This is used for reporting the mountpoints in use by a container.
|
||||||
|
type MountPoint struct { |
||||||
|
Type mount.Type `json:",omitempty"` |
||||||
|
Name string `json:",omitempty"` |
||||||
|
Source string |
||||||
|
Destination string |
||||||
|
Driver string `json:",omitempty"` |
||||||
|
Mode string |
||||||
|
RW bool |
||||||
|
Propagation mount.Propagation |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkResource is the body of the "get network" http response message
|
||||||
|
type NetworkResource struct { |
||||||
|
Name string // Name is the requested name of the network
|
||||||
|
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||||
|
Created time.Time // Created is the time the network created
|
||||||
|
Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level)
|
||||||
|
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||||
|
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||||
|
IPAM network.IPAM // IPAM is the network's IP Address Management
|
||||||
|
Internal bool // Internal represents if the network is used internal only
|
||||||
|
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||||
|
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||||
|
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||||
|
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||||
|
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
||||||
|
} |
||||||
|
|
||||||
|
// EndpointResource contains network resources allocated and used for a container in a network
|
||||||
|
type EndpointResource struct { |
||||||
|
Name string |
||||||
|
EndpointID string |
||||||
|
MacAddress string |
||||||
|
IPv4Address string |
||||||
|
IPv6Address string |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkCreate is the expected body of the "create network" http request message
|
||||||
|
type NetworkCreate struct { |
||||||
|
CheckDuplicate bool |
||||||
|
Driver string |
||||||
|
EnableIPv6 bool |
||||||
|
IPAM *network.IPAM |
||||||
|
Internal bool |
||||||
|
Attachable bool |
||||||
|
Options map[string]string |
||||||
|
Labels map[string]string |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||||
|
type NetworkCreateRequest struct { |
||||||
|
NetworkCreate |
||||||
|
Name string |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkCreateResponse is the response message sent by the server for network create call
|
||||||
|
type NetworkCreateResponse struct { |
||||||
|
ID string `json:"Id"` |
||||||
|
Warning string |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkConnect represents the data to be used to connect a container to the network
|
||||||
|
type NetworkConnect struct { |
||||||
|
Container string |
||||||
|
EndpointConfig *network.EndpointSettings `json:",omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||||
|
type NetworkDisconnect struct { |
||||||
|
Container string |
||||||
|
Force bool |
||||||
|
} |
||||||
|
|
||||||
|
// Checkpoint represents the details of a checkpoint
|
||||||
|
type Checkpoint struct { |
||||||
|
Name string // Name is the name of the checkpoint
|
||||||
|
} |
||||||
|
|
||||||
|
// Runtime describes an OCI runtime
|
||||||
|
type Runtime struct { |
||||||
|
Path string `json:"path"` |
||||||
|
Args []string `json:"runtimeArgs,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// DiskUsage contains response of Engine API:
|
||||||
|
// GET "/system/df"
|
||||||
|
type DiskUsage struct { |
||||||
|
LayersSize int64 |
||||||
|
Images []*ImageSummary |
||||||
|
Containers []*Container |
||||||
|
Volumes []*Volume |
||||||
|
} |
||||||
|
|
||||||
|
// ContainersPruneReport contains the response for Engine API:
|
||||||
|
// POST "/containers/prune"
|
||||||
|
type ContainersPruneReport struct { |
||||||
|
ContainersDeleted []string |
||||||
|
SpaceReclaimed uint64 |
||||||
|
} |
||||||
|
|
||||||
|
// VolumesPruneReport contains the response for Engine API:
|
||||||
|
// POST "/volumes/prune"
|
||||||
|
type VolumesPruneReport struct { |
||||||
|
VolumesDeleted []string |
||||||
|
SpaceReclaimed uint64 |
||||||
|
} |
||||||
|
|
||||||
|
// ImagesPruneReport contains the response for Engine API:
|
||||||
|
// POST "/images/prune"
|
||||||
|
type ImagesPruneReport struct { |
||||||
|
ImagesDeleted []ImageDelete |
||||||
|
SpaceReclaimed uint64 |
||||||
|
} |
||||||
|
|
||||||
|
// NetworksPruneReport contains the response for Engine API:
|
||||||
|
// POST "/networks/prune"
|
||||||
|
type NetworksPruneReport struct { |
||||||
|
NetworksDeleted []string |
||||||
|
} |
||||||
|
|
||||||
|
// SecretCreateResponse contains the information returned to a client
|
||||||
|
// on the creation of a new secret.
|
||||||
|
type SecretCreateResponse struct { |
||||||
|
// ID is the id of the created secret.
|
||||||
|
ID string |
||||||
|
} |
||||||
|
|
||||||
|
// SecretListOptions holds parameters to list secrets
|
||||||
|
type SecretListOptions struct { |
||||||
|
Filters filters.Args |
||||||
|
} |
@ -0,0 +1,14 @@ |
|||||||
|
## Legacy API type versions |
||||||
|
|
||||||
|
This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. |
||||||
|
|
||||||
|
Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. |
||||||
|
|
||||||
|
### Package name conventions |
||||||
|
|
||||||
|
The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: |
||||||
|
|
||||||
|
1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. |
||||||
|
2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. |
||||||
|
|
||||||
|
For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. |
@ -0,0 +1,62 @@ |
|||||||
|
package versions |
||||||
|
|
||||||
|
import ( |
||||||
|
"strconv" |
||||||
|
"strings" |
||||||
|
) |
||||||
|
|
||||||
|
// compare compares two version strings
|
||||||
|
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
|
||||||
|
func compare(v1, v2 string) int { |
||||||
|
var ( |
||||||
|
currTab = strings.Split(v1, ".") |
||||||
|
otherTab = strings.Split(v2, ".") |
||||||
|
) |
||||||
|
|
||||||
|
max := len(currTab) |
||||||
|
if len(otherTab) > max { |
||||||
|
max = len(otherTab) |
||||||
|
} |
||||||
|
for i := 0; i < max; i++ { |
||||||
|
var currInt, otherInt int |
||||||
|
|
||||||
|
if len(currTab) > i { |
||||||
|
currInt, _ = strconv.Atoi(currTab[i]) |
||||||
|
} |
||||||
|
if len(otherTab) > i { |
||||||
|
otherInt, _ = strconv.Atoi(otherTab[i]) |
||||||
|
} |
||||||
|
if currInt > otherInt { |
||||||
|
return 1 |
||||||
|
} |
||||||
|
if otherInt > currInt { |
||||||
|
return -1 |
||||||
|
} |
||||||
|
} |
||||||
|
return 0 |
||||||
|
} |
||||||
|
|
||||||
|
// LessThan checks if a version is less than another
|
||||||
|
func LessThan(v, other string) bool { |
||||||
|
return compare(v, other) == -1 |
||||||
|
} |
||||||
|
|
||||||
|
// LessThanOrEqualTo checks if a version is less than or equal to another
|
||||||
|
func LessThanOrEqualTo(v, other string) bool { |
||||||
|
return compare(v, other) <= 0 |
||||||
|
} |
||||||
|
|
||||||
|
// GreaterThan checks if a version is greater than another
|
||||||
|
func GreaterThan(v, other string) bool { |
||||||
|
return compare(v, other) == 1 |
||||||
|
} |
||||||
|
|
||||||
|
// GreaterThanOrEqualTo checks if a version is greater than or equal to another
|
||||||
|
func GreaterThanOrEqualTo(v, other string) bool { |
||||||
|
return compare(v, other) >= 0 |
||||||
|
} |
||||||
|
|
||||||
|
// Equal checks if a version is equal to another
|
||||||
|
func Equal(v, other string) bool { |
||||||
|
return compare(v, other) == 0 |
||||||
|
} |
@ -0,0 +1,58 @@ |
|||||||
|
package types |
||||||
|
|
||||||
|
// This file was generated by the swagger tool.
|
||||||
|
// Editing this file might prove futile when you re-run the swagger generate command
|
||||||
|
|
||||||
|
// Volume volume
|
||||||
|
// swagger:model Volume
|
||||||
|
type Volume struct { |
||||||
|
|
||||||
|
// Name of the volume driver used by the volume.
|
||||||
|
// Required: true
|
||||||
|
Driver string `json:"Driver"` |
||||||
|
|
||||||
|
// User-defined key/value metadata.
|
||||||
|
// Required: true
|
||||||
|
Labels map[string]string `json:"Labels"` |
||||||
|
|
||||||
|
// Mount path of the volume on the host.
|
||||||
|
// Required: true
|
||||||
|
Mountpoint string `json:"Mountpoint"` |
||||||
|
|
||||||
|
// Name of the volume.
|
||||||
|
// Required: true
|
||||||
|
Name string `json:"Name"` |
||||||
|
|
||||||
|
// The driver specific options used when creating the volume.
|
||||||
|
// Required: true
|
||||||
|
Options map[string]string `json:"Options"` |
||||||
|
|
||||||
|
// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
|
||||||
|
// Required: true
|
||||||
|
Scope string `json:"Scope"` |
||||||
|
|
||||||
|
// Low-level details about the volume, provided by the volume driver.
|
||||||
|
// Details are returned as a map with key/value pairs:
|
||||||
|
// `{"key":"value","key2":"value2"}`.
|
||||||
|
//
|
||||||
|
// The `Status` field is optional, and is omitted if the volume driver
|
||||||
|
// does not support this feature.
|
||||||
|
//
|
||||||
|
Status map[string]interface{} `json:"Status,omitempty"` |
||||||
|
|
||||||
|
// usage data
|
||||||
|
UsageData *VolumeUsageData `json:"UsageData,omitempty"` |
||||||
|
} |
||||||
|
|
||||||
|
// VolumeUsageData volume usage data
|
||||||
|
// swagger:model VolumeUsageData
|
||||||
|
type VolumeUsageData struct { |
||||||
|
|
||||||
|
// The number of containers referencing this volume.
|
||||||
|
// Required: true
|
||||||
|
RefCount int64 `json:"RefCount"` |
||||||
|
|
||||||
|
// The disk space used by the volume (local driver only)
|
||||||
|
// Required: true
|
||||||
|
Size int64 `json:"Size"` |
||||||
|
} |
@ -0,0 +1,29 @@ |
|||||||
|
package volume |
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// DO NOT EDIT THIS FILE
|
||||||
|
// This file was generated by `swagger generate operation`
|
||||||
|
//
|
||||||
|
// See hack/swagger-gen.sh
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// VolumesCreateBody volumes create body
|
||||||
|
// swagger:model VolumesCreateBody
|
||||||
|
type VolumesCreateBody struct { |
||||||
|
|
||||||
|
// Name of the volume driver to use.
|
||||||
|
// Required: true
|
||||||
|
Driver string `json:"Driver"` |
||||||
|
|
||||||
|
// A mapping of driver options and values. These options are passed directly to the driver and are driver specific.
|
||||||
|
// Required: true
|
||||||
|
DriverOpts map[string]string `json:"DriverOpts"` |
||||||
|
|
||||||
|
// User-defined key/value metadata.
|
||||||
|
// Required: true
|
||||||
|
Labels map[string]string `json:"Labels"` |
||||||
|
|
||||||
|
// The new volume's name. If not specified, Docker generates a name.
|
||||||
|
// Required: true
|
||||||
|
Name string `json:"Name"` |
||||||
|
} |
@ -0,0 +1,23 @@ |
|||||||
|
package volume |
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// DO NOT EDIT THIS FILE
|
||||||
|
// This file was generated by `swagger generate operation`
|
||||||
|
//
|
||||||
|
// See hack/swagger-gen.sh
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
import "github.com/docker/docker/api/types" |
||||||
|
|
||||||
|
// VolumesListOKBody volumes list o k body
|
||||||
|
// swagger:model VolumesListOKBody
|
||||||
|
type VolumesListOKBody struct { |
||||||
|
|
||||||
|
// List of volumes
|
||||||
|
// Required: true
|
||||||
|
Volumes []*types.Volume `json:"Volumes"` |
||||||
|
|
||||||
|
// Warnings that occurred when fetching the list of volumes
|
||||||
|
// Required: true
|
||||||
|
Warnings []string `json:"Warnings"` |
||||||
|
} |
@ -0,0 +1,13 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// CheckpointCreate creates a checkpoint from the given container with the given name
|
||||||
|
func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { |
||||||
|
resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,20 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"net/url" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// CheckpointDelete deletes the checkpoint with the given name from the given container
|
||||||
|
func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { |
||||||
|
query := url.Values{} |
||||||
|
if options.CheckpointDir != "" { |
||||||
|
query.Set("dir", options.CheckpointDir) |
||||||
|
} |
||||||
|
|
||||||
|
resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,28 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"net/url" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// CheckpointList returns the volumes configured in the docker host.
|
||||||
|
func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { |
||||||
|
var checkpoints []types.Checkpoint |
||||||
|
|
||||||
|
query := url.Values{} |
||||||
|
if options.CheckpointDir != "" { |
||||||
|
query.Set("dir", options.CheckpointDir) |
||||||
|
} |
||||||
|
|
||||||
|
resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) |
||||||
|
if err != nil { |
||||||
|
return checkpoints, err |
||||||
|
} |
||||||
|
|
||||||
|
err = json.NewDecoder(resp.body).Decode(&checkpoints) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return checkpoints, err |
||||||
|
} |
@ -0,0 +1,6 @@ |
|||||||
|
// +build linux freebsd solaris openbsd darwin
|
||||||
|
|
||||||
|
package client |
||||||
|
|
||||||
|
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
|
||||||
|
const DefaultDockerHost = "unix:///var/run/docker.sock" |
@ -0,0 +1,4 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
// DefaultDockerHost defines os specific default if DOCKER_HOST is unset
|
||||||
|
const DefaultDockerHost = "npipe:////./pipe/docker_engine" |
@ -0,0 +1,37 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"net/url" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerAttach attaches a connection to a container in the server.
|
||||||
|
// It returns a types.HijackedConnection with the hijacked connection
|
||||||
|
// and the a reader to get output. It's up to the called to close
|
||||||
|
// the hijacked connection by calling types.HijackedResponse.Close.
|
||||||
|
func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { |
||||||
|
query := url.Values{} |
||||||
|
if options.Stream { |
||||||
|
query.Set("stream", "1") |
||||||
|
} |
||||||
|
if options.Stdin { |
||||||
|
query.Set("stdin", "1") |
||||||
|
} |
||||||
|
if options.Stdout { |
||||||
|
query.Set("stdout", "1") |
||||||
|
} |
||||||
|
if options.Stderr { |
||||||
|
query.Set("stderr", "1") |
||||||
|
} |
||||||
|
if options.DetachKeys != "" { |
||||||
|
query.Set("detachKeys", options.DetachKeys) |
||||||
|
} |
||||||
|
if options.Logs { |
||||||
|
query.Set("logs", "1") |
||||||
|
} |
||||||
|
|
||||||
|
headers := map[string][]string{"Content-Type": {"text/plain"}} |
||||||
|
return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) |
||||||
|
} |
@ -0,0 +1,53 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"errors" |
||||||
|
"net/url" |
||||||
|
|
||||||
|
distreference "github.com/docker/distribution/reference" |
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"github.com/docker/docker/api/types/reference" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerCommit applies changes into a container and creates a new tagged image.
|
||||||
|
func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { |
||||||
|
var repository, tag string |
||||||
|
if options.Reference != "" { |
||||||
|
distributionRef, err := distreference.ParseNamed(options.Reference) |
||||||
|
if err != nil { |
||||||
|
return types.IDResponse{}, err |
||||||
|
} |
||||||
|
|
||||||
|
if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { |
||||||
|
return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") |
||||||
|
} |
||||||
|
|
||||||
|
tag = reference.GetTagFromNamedRef(distributionRef) |
||||||
|
repository = distributionRef.Name() |
||||||
|
} |
||||||
|
|
||||||
|
query := url.Values{} |
||||||
|
query.Set("container", container) |
||||||
|
query.Set("repo", repository) |
||||||
|
query.Set("tag", tag) |
||||||
|
query.Set("comment", options.Comment) |
||||||
|
query.Set("author", options.Author) |
||||||
|
for _, change := range options.Changes { |
||||||
|
query.Add("changes", change) |
||||||
|
} |
||||||
|
if options.Pause != true { |
||||||
|
query.Set("pause", "0") |
||||||
|
} |
||||||
|
|
||||||
|
var response types.IDResponse |
||||||
|
resp, err := cli.post(ctx, "/commit", query, options.Config, nil) |
||||||
|
if err != nil { |
||||||
|
return response, err |
||||||
|
} |
||||||
|
|
||||||
|
err = json.NewDecoder(resp.body).Decode(&response) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return response, err |
||||||
|
} |
@ -0,0 +1,97 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/base64" |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
"io" |
||||||
|
"net/http" |
||||||
|
"net/url" |
||||||
|
"path/filepath" |
||||||
|
"strings" |
||||||
|
|
||||||
|
"golang.org/x/net/context" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerStatPath returns Stat information about a path inside the container filesystem.
|
||||||
|
func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { |
||||||
|
query := url.Values{} |
||||||
|
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
|
||||||
|
|
||||||
|
urlStr := fmt.Sprintf("/containers/%s/archive", containerID) |
||||||
|
response, err := cli.head(ctx, urlStr, query, nil) |
||||||
|
if err != nil { |
||||||
|
return types.ContainerPathStat{}, err |
||||||
|
} |
||||||
|
defer ensureReaderClosed(response) |
||||||
|
return getContainerPathStatFromHeader(response.header) |
||||||
|
} |
||||||
|
|
||||||
|
// CopyToContainer copies content into the container filesystem.
|
||||||
|
func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { |
||||||
|
query := url.Values{} |
||||||
|
query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API.
|
||||||
|
// Do not allow for an existing directory to be overwritten by a non-directory and vice versa.
|
||||||
|
if !options.AllowOverwriteDirWithFile { |
||||||
|
query.Set("noOverwriteDirNonDir", "true") |
||||||
|
} |
||||||
|
|
||||||
|
apiPath := fmt.Sprintf("/containers/%s/archive", container) |
||||||
|
|
||||||
|
response, err := cli.putRaw(ctx, apiPath, query, content, nil) |
||||||
|
if err != nil { |
||||||
|
return err |
||||||
|
} |
||||||
|
defer ensureReaderClosed(response) |
||||||
|
|
||||||
|
if response.statusCode != http.StatusOK { |
||||||
|
return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) |
||||||
|
} |
||||||
|
|
||||||
|
return nil |
||||||
|
} |
||||||
|
|
||||||
|
// CopyFromContainer gets the content from the container and returns it as a Reader
|
||||||
|
// to manipulate it in the host. It's up to the caller to close the reader.
|
||||||
|
func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { |
||||||
|
query := make(url.Values, 1) |
||||||
|
query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API.
|
||||||
|
|
||||||
|
apiPath := fmt.Sprintf("/containers/%s/archive", container) |
||||||
|
response, err := cli.get(ctx, apiPath, query, nil) |
||||||
|
if err != nil { |
||||||
|
return nil, types.ContainerPathStat{}, err |
||||||
|
} |
||||||
|
|
||||||
|
if response.statusCode != http.StatusOK { |
||||||
|
return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) |
||||||
|
} |
||||||
|
|
||||||
|
// In order to get the copy behavior right, we need to know information
|
||||||
|
// about both the source and the destination. The response headers include
|
||||||
|
// stat info about the source that we can use in deciding exactly how to
|
||||||
|
// copy it locally. Along with the stat info about the local destination,
|
||||||
|
// we have everything we need to handle the multiple possibilities there
|
||||||
|
// can be when copying a file/dir from one location to another file/dir.
|
||||||
|
stat, err := getContainerPathStatFromHeader(response.header) |
||||||
|
if err != nil { |
||||||
|
return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) |
||||||
|
} |
||||||
|
return response.body, stat, err |
||||||
|
} |
||||||
|
|
||||||
|
func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { |
||||||
|
var stat types.ContainerPathStat |
||||||
|
|
||||||
|
encodedStat := header.Get("X-Docker-Container-Path-Stat") |
||||||
|
statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) |
||||||
|
|
||||||
|
err := json.NewDecoder(statDecoder).Decode(&stat) |
||||||
|
if err != nil { |
||||||
|
err = fmt.Errorf("unable to decode container path stat header: %s", err) |
||||||
|
} |
||||||
|
|
||||||
|
return stat, err |
||||||
|
} |
@ -0,0 +1,50 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"net/url" |
||||||
|
"strings" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/container" |
||||||
|
"github.com/docker/docker/api/types/network" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
type configWrapper struct { |
||||||
|
*container.Config |
||||||
|
HostConfig *container.HostConfig |
||||||
|
NetworkingConfig *network.NetworkingConfig |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerCreate creates a new container based in the given configuration.
|
||||||
|
// It can be associated with a name, but it's not mandatory.
|
||||||
|
func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { |
||||||
|
var response container.ContainerCreateCreatedBody |
||||||
|
|
||||||
|
if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { |
||||||
|
return response, err |
||||||
|
} |
||||||
|
|
||||||
|
query := url.Values{} |
||||||
|
if containerName != "" { |
||||||
|
query.Set("name", containerName) |
||||||
|
} |
||||||
|
|
||||||
|
body := configWrapper{ |
||||||
|
Config: config, |
||||||
|
HostConfig: hostConfig, |
||||||
|
NetworkingConfig: networkingConfig, |
||||||
|
} |
||||||
|
|
||||||
|
serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) |
||||||
|
if err != nil { |
||||||
|
if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { |
||||||
|
return response, imageNotFoundError{config.Image} |
||||||
|
} |
||||||
|
return response, err |
||||||
|
} |
||||||
|
|
||||||
|
err = json.NewDecoder(serverResp.body).Decode(&response) |
||||||
|
ensureReaderClosed(serverResp) |
||||||
|
return response, err |
||||||
|
} |
@ -0,0 +1,23 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"net/url" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerDiff shows differences in a container filesystem since it was started.
|
||||||
|
func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { |
||||||
|
var changes []types.ContainerChange |
||||||
|
|
||||||
|
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) |
||||||
|
if err != nil { |
||||||
|
return changes, err |
||||||
|
} |
||||||
|
|
||||||
|
err = json.NewDecoder(serverResp.body).Decode(&changes) |
||||||
|
ensureReaderClosed(serverResp) |
||||||
|
return changes, err |
||||||
|
} |
@ -0,0 +1,54 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerExecCreate creates a new exec configuration to run an exec process.
|
||||||
|
func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { |
||||||
|
var response types.IDResponse |
||||||
|
|
||||||
|
if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { |
||||||
|
return response, err |
||||||
|
} |
||||||
|
|
||||||
|
resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) |
||||||
|
if err != nil { |
||||||
|
return response, err |
||||||
|
} |
||||||
|
err = json.NewDecoder(resp.body).Decode(&response) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return response, err |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerExecStart starts an exec process already created in the docker host.
|
||||||
|
func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { |
||||||
|
resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return err |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerExecAttach attaches a connection to an exec process in the server.
|
||||||
|
// It returns a types.HijackedConnection with the hijacked connection
|
||||||
|
// and the a reader to get output. It's up to the called to close
|
||||||
|
// the hijacked connection by calling types.HijackedResponse.Close.
|
||||||
|
func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { |
||||||
|
headers := map[string][]string{"Content-Type": {"application/json"}} |
||||||
|
return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerExecInspect returns information about a specific exec process on the docker host.
|
||||||
|
func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { |
||||||
|
var response types.ContainerExecInspect |
||||||
|
resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) |
||||||
|
if err != nil { |
||||||
|
return response, err |
||||||
|
} |
||||||
|
|
||||||
|
err = json.NewDecoder(resp.body).Decode(&response) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return response, err |
||||||
|
} |
@ -0,0 +1,20 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"io" |
||||||
|
"net/url" |
||||||
|
|
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerExport retrieves the raw contents of a container
|
||||||
|
// and returns them as an io.ReadCloser. It's up to the caller
|
||||||
|
// to close the stream.
|
||||||
|
func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { |
||||||
|
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
return serverResp.body, nil |
||||||
|
} |
@ -0,0 +1,54 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"bytes" |
||||||
|
"encoding/json" |
||||||
|
"io/ioutil" |
||||||
|
"net/http" |
||||||
|
"net/url" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerInspect returns the container information.
|
||||||
|
func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { |
||||||
|
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) |
||||||
|
if err != nil { |
||||||
|
if serverResp.statusCode == http.StatusNotFound { |
||||||
|
return types.ContainerJSON{}, containerNotFoundError{containerID} |
||||||
|
} |
||||||
|
return types.ContainerJSON{}, err |
||||||
|
} |
||||||
|
|
||||||
|
var response types.ContainerJSON |
||||||
|
err = json.NewDecoder(serverResp.body).Decode(&response) |
||||||
|
ensureReaderClosed(serverResp) |
||||||
|
return response, err |
||||||
|
} |
||||||
|
|
||||||
|
// ContainerInspectWithRaw returns the container information and its raw representation.
|
||||||
|
func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { |
||||||
|
query := url.Values{} |
||||||
|
if getSize { |
||||||
|
query.Set("size", "1") |
||||||
|
} |
||||||
|
serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) |
||||||
|
if err != nil { |
||||||
|
if serverResp.statusCode == http.StatusNotFound { |
||||||
|
return types.ContainerJSON{}, nil, containerNotFoundError{containerID} |
||||||
|
} |
||||||
|
return types.ContainerJSON{}, nil, err |
||||||
|
} |
||||||
|
defer ensureReaderClosed(serverResp) |
||||||
|
|
||||||
|
body, err := ioutil.ReadAll(serverResp.body) |
||||||
|
if err != nil { |
||||||
|
return types.ContainerJSON{}, nil, err |
||||||
|
} |
||||||
|
|
||||||
|
var response types.ContainerJSON |
||||||
|
rdr := bytes.NewReader(body) |
||||||
|
err = json.NewDecoder(rdr).Decode(&response) |
||||||
|
return response, body, err |
||||||
|
} |
@ -0,0 +1,17 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"net/url" |
||||||
|
|
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerKill terminates the container process but does not remove the container from the docker host.
|
||||||
|
func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { |
||||||
|
query := url.Values{} |
||||||
|
query.Set("signal", signal) |
||||||
|
|
||||||
|
resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,56 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"net/url" |
||||||
|
"strconv" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"github.com/docker/docker/api/types/filters" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerList returns the list of containers in the docker host.
|
||||||
|
func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { |
||||||
|
query := url.Values{} |
||||||
|
|
||||||
|
if options.All { |
||||||
|
query.Set("all", "1") |
||||||
|
} |
||||||
|
|
||||||
|
if options.Limit != -1 { |
||||||
|
query.Set("limit", strconv.Itoa(options.Limit)) |
||||||
|
} |
||||||
|
|
||||||
|
if options.Since != "" { |
||||||
|
query.Set("since", options.Since) |
||||||
|
} |
||||||
|
|
||||||
|
if options.Before != "" { |
||||||
|
query.Set("before", options.Before) |
||||||
|
} |
||||||
|
|
||||||
|
if options.Size { |
||||||
|
query.Set("size", "1") |
||||||
|
} |
||||||
|
|
||||||
|
if options.Filters.Len() > 0 { |
||||||
|
filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) |
||||||
|
|
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
query.Set("filters", filterJSON) |
||||||
|
} |
||||||
|
|
||||||
|
resp, err := cli.get(ctx, "/containers/json", query, nil) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
|
||||||
|
var containers []types.Container |
||||||
|
err = json.NewDecoder(resp.body).Decode(&containers) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return containers, err |
||||||
|
} |
@ -0,0 +1,52 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"io" |
||||||
|
"net/url" |
||||||
|
"time" |
||||||
|
|
||||||
|
"golang.org/x/net/context" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
timetypes "github.com/docker/docker/api/types/time" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerLogs returns the logs generated by a container in an io.ReadCloser.
|
||||||
|
// It's up to the caller to close the stream.
|
||||||
|
func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { |
||||||
|
query := url.Values{} |
||||||
|
if options.ShowStdout { |
||||||
|
query.Set("stdout", "1") |
||||||
|
} |
||||||
|
|
||||||
|
if options.ShowStderr { |
||||||
|
query.Set("stderr", "1") |
||||||
|
} |
||||||
|
|
||||||
|
if options.Since != "" { |
||||||
|
ts, err := timetypes.GetTimestamp(options.Since, time.Now()) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
query.Set("since", ts) |
||||||
|
} |
||||||
|
|
||||||
|
if options.Timestamps { |
||||||
|
query.Set("timestamps", "1") |
||||||
|
} |
||||||
|
|
||||||
|
if options.Details { |
||||||
|
query.Set("details", "1") |
||||||
|
} |
||||||
|
|
||||||
|
if options.Follow { |
||||||
|
query.Set("follow", "1") |
||||||
|
} |
||||||
|
query.Set("tail", options.Tail) |
||||||
|
|
||||||
|
resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) |
||||||
|
if err != nil { |
||||||
|
return nil, err |
||||||
|
} |
||||||
|
return resp.body, nil |
||||||
|
} |
@ -0,0 +1,10 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import "golang.org/x/net/context" |
||||||
|
|
||||||
|
// ContainerPause pauses the main process of a given container without terminating it.
|
||||||
|
func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { |
||||||
|
resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return err |
||||||
|
} |
@ -0,0 +1,36 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"encoding/json" |
||||||
|
"fmt" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"github.com/docker/docker/api/types/filters" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainersPrune requests the daemon to delete unused data
|
||||||
|
func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { |
||||||
|
var report types.ContainersPruneReport |
||||||
|
|
||||||
|
if err := cli.NewVersionError("1.25", "container prune"); err != nil { |
||||||
|
return report, err |
||||||
|
} |
||||||
|
|
||||||
|
query, err := getFiltersQuery(pruneFilters) |
||||||
|
if err != nil { |
||||||
|
return report, err |
||||||
|
} |
||||||
|
|
||||||
|
serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) |
||||||
|
if err != nil { |
||||||
|
return report, err |
||||||
|
} |
||||||
|
defer ensureReaderClosed(serverResp) |
||||||
|
|
||||||
|
if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { |
||||||
|
return report, fmt.Errorf("Error retrieving disk usage: %v", err) |
||||||
|
} |
||||||
|
|
||||||
|
return report, nil |
||||||
|
} |
@ -0,0 +1,27 @@ |
|||||||
|
package client |
||||||
|
|
||||||
|
import ( |
||||||
|
"net/url" |
||||||
|
|
||||||
|
"github.com/docker/docker/api/types" |
||||||
|
"golang.org/x/net/context" |
||||||
|
) |
||||||
|
|
||||||
|
// ContainerRemove kills and removes a container from the docker host.
|
||||||
|
func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { |
||||||
|
query := url.Values{} |
||||||
|
if options.RemoveVolumes { |
||||||
|
query.Set("v", "1") |
||||||
|
} |
||||||
|
if options.RemoveLinks { |
||||||
|
query.Set("link", "1") |
||||||
|
} |
||||||
|
|
||||||
|
if options.Force { |
||||||
|
query.Set("force", "1") |
||||||
|
} |
||||||
|
|
||||||
|
resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) |
||||||
|
ensureReaderClosed(resp) |
||||||
|
return err |
||||||
|
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue