mirror of https://github.com/k3d-io/k3d
parent
433ff16467
commit
a34bbc3ef7
@ -0,0 +1,29 @@ |
||||
/* |
||||
Copyright © 2019 Thorsten Klein <iwilltry42@gmail.com> |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in |
||||
all copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||
THE SOFTWARE. |
||||
*/ |
||||
|
||||
package docker |
||||
|
||||
type docker struct{} |
||||
|
||||
func New() *Runtime { |
||||
|
||||
} |
@ -0,0 +1,36 @@ |
||||
/* |
||||
Copyright © 2019 Thorsten Klein <iwilltry42@gmail.com> |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in |
||||
all copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
||||
THE SOFTWARE. |
||||
*/ |
||||
package runtimes |
||||
|
||||
import ( |
||||
k3d "github.com/rancher/k3d/pkg/types" |
||||
) |
||||
|
||||
// Runtime defines an interface that can be implemented for various container runtime environments (docker, containerd, etc.)
|
||||
type Runtime interface { |
||||
CreateContainer(*k3d.Node) error |
||||
StartContainer() error |
||||
ExecContainer() error |
||||
StopContainer() error |
||||
DeleteContainer() error |
||||
GetContainerLogs() error |
||||
} |
@ -0,0 +1 @@ |
||||
*.exe |
@ -0,0 +1,22 @@ |
||||
The MIT License (MIT) |
||||
|
||||
Copyright (c) 2015 Microsoft |
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy |
||||
of this software and associated documentation files (the "Software"), to deal |
||||
in the Software without restriction, including without limitation the rights |
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
||||
copies of the Software, and to permit persons to whom the Software is |
||||
furnished to do so, subject to the following conditions: |
||||
|
||||
The above copyright notice and this permission notice shall be included in all |
||||
copies or substantial portions of the Software. |
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
||||
SOFTWARE. |
||||
|
@ -0,0 +1,22 @@ |
||||
# go-winio |
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in |
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and |
||||
for using named pipes as a net transport. |
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go |
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and |
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net |
||||
package. |
||||
|
||||
Please see the LICENSE file for licensing information. |
||||
|
||||
This project has adopted the [Microsoft Open Source Code of |
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information |
||||
see the [Code of Conduct |
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact |
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional |
||||
questions or comments. |
||||
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe |
||||
for another named pipe implementation. |
@ -0,0 +1,280 @@ |
||||
// +build windows
|
||||
|
||||
package winio |
||||
|
||||
import ( |
||||
"encoding/binary" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"io/ioutil" |
||||
"os" |
||||
"runtime" |
||||
"syscall" |
||||
"unicode/utf16" |
||||
) |
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const ( |
||||
BackupData = uint32(iota + 1) |
||||
BackupEaData |
||||
BackupSecurity |
||||
BackupAlternateData |
||||
BackupLink |
||||
BackupPropertyData |
||||
BackupObjectId |
||||
BackupReparseData |
||||
BackupSparseBlock |
||||
BackupTxfsData |
||||
) |
||||
|
||||
const ( |
||||
StreamSparseAttributes = uint32(8) |
||||
) |
||||
|
||||
const ( |
||||
WRITE_DAC = 0x40000 |
||||
WRITE_OWNER = 0x80000 |
||||
ACCESS_SYSTEM_SECURITY = 0x1000000 |
||||
) |
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct { |
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
Name string // The name of the stream (for BackupAlternateData only).
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
} |
||||
|
||||
type win32StreamId struct { |
||||
StreamId uint32 |
||||
Attributes uint32 |
||||
Size uint64 |
||||
NameSize uint32 |
||||
} |
||||
|
||||
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||
// of BackupHeader values.
|
||||
type BackupStreamReader struct { |
||||
r io.Reader |
||||
bytesLeft int64 |
||||
} |
||||
|
||||
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||
func NewBackupStreamReader(r io.Reader) *BackupStreamReader { |
||||
return &BackupStreamReader{r, 0} |
||||
} |
||||
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) { |
||||
if r.bytesLeft > 0 { |
||||
if s, ok := r.r.(io.Seeker); ok { |
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil { |
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { |
||||
return nil, err |
||||
} |
||||
r.bytesLeft = 0 |
||||
} |
||||
} |
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
var wsi win32StreamId |
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { |
||||
return nil, err |
||||
} |
||||
hdr := &BackupHeader{ |
||||
Id: wsi.StreamId, |
||||
Attributes: wsi.Attributes, |
||||
Size: int64(wsi.Size), |
||||
} |
||||
if wsi.NameSize != 0 { |
||||
name := make([]uint16, int(wsi.NameSize/2)) |
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { |
||||
return nil, err |
||||
} |
||||
hdr.Name = syscall.UTF16ToString(name) |
||||
} |
||||
if wsi.StreamId == BackupSparseBlock { |
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { |
||||
return nil, err |
||||
} |
||||
hdr.Size -= 8 |
||||
} |
||||
r.bytesLeft = hdr.Size |
||||
return hdr, nil |
||||
} |
||||
|
||||
// Read reads from the current backup stream.
|
||||
func (r *BackupStreamReader) Read(b []byte) (int, error) { |
||||
if r.bytesLeft == 0 { |
||||
return 0, io.EOF |
||||
} |
||||
if int64(len(b)) > r.bytesLeft { |
||||
b = b[:r.bytesLeft] |
||||
} |
||||
n, err := r.r.Read(b) |
||||
r.bytesLeft -= int64(n) |
||||
if err == io.EOF { |
||||
err = io.ErrUnexpectedEOF |
||||
} else if r.bytesLeft == 0 && err == nil { |
||||
err = io.EOF |
||||
} |
||||
return n, err |
||||
} |
||||
|
||||
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||
type BackupStreamWriter struct { |
||||
w io.Writer |
||||
bytesLeft int64 |
||||
} |
||||
|
||||
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { |
||||
return &BackupStreamWriter{w, 0} |
||||
} |
||||
|
||||
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { |
||||
if w.bytesLeft != 0 { |
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft) |
||||
} |
||||
name := utf16.Encode([]rune(hdr.Name)) |
||||
wsi := win32StreamId{ |
||||
StreamId: hdr.Id, |
||||
Attributes: hdr.Attributes, |
||||
Size: uint64(hdr.Size), |
||||
NameSize: uint32(len(name) * 2), |
||||
} |
||||
if hdr.Id == BackupSparseBlock { |
||||
// Include space for the int64 block offset
|
||||
wsi.Size += 8 |
||||
} |
||||
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { |
||||
return err |
||||
} |
||||
if len(name) != 0 { |
||||
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
if hdr.Id == BackupSparseBlock { |
||||
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
w.bytesLeft = hdr.Size |
||||
return nil |
||||
} |
||||
|
||||
// Write writes to the current backup stream.
|
||||
func (w *BackupStreamWriter) Write(b []byte) (int, error) { |
||||
if w.bytesLeft < int64(len(b)) { |
||||
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) |
||||
} |
||||
n, err := w.w.Write(b) |
||||
w.bytesLeft -= int64(n) |
||||
return n, err |
||||
} |
||||
|
||||
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||
type BackupFileReader struct { |
||||
f *os.File |
||||
includeSecurity bool |
||||
ctx uintptr |
||||
} |
||||
|
||||
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||
// Read will attempt to read the security descriptor of the file.
|
||||
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { |
||||
r := &BackupFileReader{f, includeSecurity, 0} |
||||
return r |
||||
} |
||||
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) { |
||||
var bytesRead uint32 |
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) |
||||
if err != nil { |
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err} |
||||
} |
||||
runtime.KeepAlive(r.f) |
||||
if bytesRead == 0 { |
||||
return 0, io.EOF |
||||
} |
||||
return int(bytesRead), nil |
||||
} |
||||
|
||||
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error { |
||||
if r.ctx != 0 { |
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) |
||||
runtime.KeepAlive(r.f) |
||||
r.ctx = 0 |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||
type BackupFileWriter struct { |
||||
f *os.File |
||||
includeSecurity bool |
||||
ctx uintptr |
||||
} |
||||
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { |
||||
w := &BackupFileWriter{f, includeSecurity, 0} |
||||
return w |
||||
} |
||||
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) { |
||||
var bytesWritten uint32 |
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) |
||||
if err != nil { |
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err} |
||||
} |
||||
runtime.KeepAlive(w.f) |
||||
if int(bytesWritten) != len(b) { |
||||
return int(bytesWritten), errors.New("not all bytes could be written") |
||||
} |
||||
return len(b), nil |
||||
} |
||||
|
||||
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error { |
||||
if w.ctx != 0 { |
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) |
||||
runtime.KeepAlive(w.f) |
||||
w.ctx = 0 |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||
// or restore privileges have been acquired.
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { |
||||
winPath, err := syscall.UTF16FromString(path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) |
||||
if err != nil { |
||||
err = &os.PathError{Op: "open", Path: path, Err: err} |
||||
return nil, err |
||||
} |
||||
return os.NewFile(uintptr(h), path), nil |
||||
} |
@ -0,0 +1,137 @@ |
||||
package winio |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"errors" |
||||
) |
||||
|
||||
type fileFullEaInformation struct { |
||||
NextEntryOffset uint32 |
||||
Flags uint8 |
||||
NameLength uint8 |
||||
ValueLength uint16 |
||||
} |
||||
|
||||
var ( |
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) |
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer") |
||||
errEaNameTooLarge = errors.New("extended attribute name too large") |
||||
errEaValueTooLarge = errors.New("extended attribute value too large") |
||||
) |
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct { |
||||
Name string |
||||
Value []byte |
||||
Flags uint8 |
||||
} |
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { |
||||
var info fileFullEaInformation |
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) |
||||
if err != nil { |
||||
err = errInvalidEaBuffer |
||||
return |
||||
} |
||||
|
||||
nameOffset := fileFullEaInformationSize |
||||
nameLen := int(info.NameLength) |
||||
valueOffset := nameOffset + int(info.NameLength) + 1 |
||||
valueLen := int(info.ValueLength) |
||||
nextOffset := int(info.NextEntryOffset) |
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { |
||||
err = errInvalidEaBuffer |
||||
return |
||||
} |
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen]) |
||||
ea.Value = b[valueOffset : valueOffset+valueLen] |
||||
ea.Flags = info.Flags |
||||
if info.NextEntryOffset != 0 { |
||||
nb = b[info.NextEntryOffset:] |
||||
} |
||||
return |
||||
} |
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { |
||||
for len(b) != 0 { |
||||
ea, nb, err := parseEa(b) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
eas = append(eas, ea) |
||||
b = nb |
||||
} |
||||
return |
||||
} |
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { |
||||
if int(uint8(len(ea.Name))) != len(ea.Name) { |
||||
return errEaNameTooLarge |
||||
} |
||||
if int(uint16(len(ea.Value))) != len(ea.Value) { |
||||
return errEaValueTooLarge |
||||
} |
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) |
||||
withPadding := (entrySize + 3) &^ 3 |
||||
nextOffset := uint32(0) |
||||
if !last { |
||||
nextOffset = withPadding |
||||
} |
||||
info := fileFullEaInformation{ |
||||
NextEntryOffset: nextOffset, |
||||
Flags: ea.Flags, |
||||
NameLength: uint8(len(ea.Name)), |
||||
ValueLength: uint16(len(ea.Value)), |
||||
} |
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
_, err = buf.Write([]byte(ea.Name)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
err = buf.WriteByte(0) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
_, err = buf.Write(ea.Value) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { |
||||
var buf bytes.Buffer |
||||
for i := range eas { |
||||
last := false |
||||
if i == len(eas)-1 { |
||||
last = true |
||||
} |
||||
|
||||
err := writeEa(&buf, &eas[i], last) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
return buf.Bytes(), nil |
||||
} |
@ -0,0 +1,323 @@ |
||||
// +build windows
|
||||
|
||||
package winio |
||||
|
||||
import ( |
||||
"errors" |
||||
"io" |
||||
"runtime" |
||||
"sync" |
||||
"sync/atomic" |
||||
"syscall" |
||||
"time" |
||||
) |
||||
|
||||
//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx
|
||||
//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort
|
||||
//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus
|
||||
//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes
|
||||
//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
|
||||
|
||||
type atomicBool int32 |
||||
|
||||
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } |
||||
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } |
||||
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } |
||||
func (b *atomicBool) swap(new bool) bool { |
||||
var newInt int32 |
||||
if new { |
||||
newInt = 1 |
||||
} |
||||
return atomic.SwapInt32((*int32)(b), newInt) == 1 |
||||
} |
||||
|
||||
const ( |
||||
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 |
||||
cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 |
||||
) |
||||
|
||||
var ( |
||||
ErrFileClosed = errors.New("file has already been closed") |
||||
ErrTimeout = &timeoutError{} |
||||
) |
||||
|
||||
type timeoutError struct{} |
||||
|
||||
func (e *timeoutError) Error() string { return "i/o timeout" } |
||||
func (e *timeoutError) Timeout() bool { return true } |
||||
func (e *timeoutError) Temporary() bool { return true } |
||||
|
||||
type timeoutChan chan struct{} |
||||
|
||||
var ioInitOnce sync.Once |
||||
var ioCompletionPort syscall.Handle |
||||
|
||||
// ioResult contains the result of an asynchronous IO operation
|
||||
type ioResult struct { |
||||
bytes uint32 |
||||
err error |
||||
} |
||||
|
||||
// ioOperation represents an outstanding asynchronous Win32 IO
|
||||
type ioOperation struct { |
||||
o syscall.Overlapped |
||||
ch chan ioResult |
||||
} |
||||
|
||||
func initIo() { |
||||
h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
ioCompletionPort = h |
||||
go ioCompletionProcessor(h) |
||||
} |
||||
|
||||
// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall.
|
||||
// It takes ownership of this handle and will close it if it is garbage collected.
|
||||
type win32File struct { |
||||
handle syscall.Handle |
||||
wg sync.WaitGroup |
||||
wgLock sync.RWMutex |
||||
closing atomicBool |
||||
socket bool |
||||
readDeadline deadlineHandler |
||||
writeDeadline deadlineHandler |
||||
} |
||||
|
||||
type deadlineHandler struct { |
||||
setLock sync.Mutex |
||||
channel timeoutChan |
||||
channelLock sync.RWMutex |
||||
timer *time.Timer |
||||
timedout atomicBool |
||||
} |
||||
|
||||
// makeWin32File makes a new win32File from an existing file handle
|
||||
func makeWin32File(h syscall.Handle) (*win32File, error) { |
||||
f := &win32File{handle: h} |
||||
ioInitOnce.Do(initIo) |
||||
_, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
f.readDeadline.channel = make(timeoutChan) |
||||
f.writeDeadline.channel = make(timeoutChan) |
||||
return f, nil |
||||
} |
||||
|
||||
func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { |
||||
// If we return the result of makeWin32File directly, it can result in an
|
||||
// interface-wrapped nil, rather than a nil interface value.
|
||||
f, err := makeWin32File(h) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return f, nil |
||||
} |
||||
|
||||
// closeHandle closes the resources associated with a Win32 handle
|
||||
func (f *win32File) closeHandle() { |
||||
f.wgLock.Lock() |
||||
// Atomically set that we are closing, releasing the resources only once.
|
||||
if !f.closing.swap(true) { |
||||
f.wgLock.Unlock() |
||||
// cancel all IO and wait for it to complete
|
||||
cancelIoEx(f.handle, nil) |
||||
f.wg.Wait() |
||||
// at this point, no new IO can start
|
||||
syscall.Close(f.handle) |
||||
f.handle = 0 |
||||
} else { |
||||
f.wgLock.Unlock() |
||||
} |
||||
} |
||||
|
||||
// Close closes a win32File.
|
||||
func (f *win32File) Close() error { |
||||
f.closeHandle() |
||||
return nil |
||||
} |
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) { |
||||
f.wgLock.RLock() |
||||
if f.closing.isSet() { |
||||
f.wgLock.RUnlock() |
||||
return nil, ErrFileClosed |
||||
} |
||||
f.wg.Add(1) |
||||
f.wgLock.RUnlock() |
||||
c := &ioOperation{} |
||||
c.ch = make(chan ioResult) |
||||
return c, nil |
||||
} |
||||
|
||||
// ioCompletionProcessor processes completed async IOs forever
|
||||
func ioCompletionProcessor(h syscall.Handle) { |
||||
for { |
||||
var bytes uint32 |
||||
var key uintptr |
||||
var op *ioOperation |
||||
err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) |
||||
if op == nil { |
||||
panic(err) |
||||
} |
||||
op.ch <- ioResult{bytes, err} |
||||
} |
||||
} |
||||
|
||||
// asyncIo processes the return value from ReadFile or WriteFile, blocking until
|
||||
// the operation has actually completed.
|
||||
func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { |
||||
if err != syscall.ERROR_IO_PENDING { |
||||
return int(bytes), err |
||||
} |
||||
|
||||
if f.closing.isSet() { |
||||
cancelIoEx(f.handle, &c.o) |
||||
} |
||||
|
||||
var timeout timeoutChan |
||||
if d != nil { |
||||
d.channelLock.Lock() |
||||
timeout = d.channel |
||||
d.channelLock.Unlock() |
||||
} |
||||
|
||||
var r ioResult |
||||
select { |
||||
case r = <-c.ch: |
||||
err = r.err |
||||
if err == syscall.ERROR_OPERATION_ABORTED { |
||||
if f.closing.isSet() { |
||||
err = ErrFileClosed |
||||
} |
||||
} else if err != nil && f.socket { |
||||
// err is from Win32. Query the overlapped structure to get the winsock error.
|
||||
var bytes, flags uint32 |
||||
err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) |
||||
} |
||||
case <-timeout: |
||||
cancelIoEx(f.handle, &c.o) |
||||
r = <-c.ch |
||||
err = r.err |
||||
if err == syscall.ERROR_OPERATION_ABORTED { |
||||
err = ErrTimeout |
||||
} |
||||
} |
||||
|
||||
// runtime.KeepAlive is needed, as c is passed via native
|
||||
// code to ioCompletionProcessor, c must remain alive
|
||||
// until the channel read is complete.
|
||||
runtime.KeepAlive(c) |
||||
return int(r.bytes), err |
||||
} |
||||
|
||||
// Read reads from a file handle.
|
||||
func (f *win32File) Read(b []byte) (int, error) { |
||||
c, err := f.prepareIo() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
defer f.wg.Done() |
||||
|
||||
if f.readDeadline.timedout.isSet() { |
||||
return 0, ErrTimeout |
||||
} |
||||
|
||||
var bytes uint32 |
||||
err = syscall.ReadFile(f.handle, b, &bytes, &c.o) |
||||
n, err := f.asyncIo(c, &f.readDeadline, bytes, err) |
||||
runtime.KeepAlive(b) |
||||
|
||||
// Handle EOF conditions.
|
||||
if err == nil && n == 0 && len(b) != 0 { |
||||
return 0, io.EOF |
||||
} else if err == syscall.ERROR_BROKEN_PIPE { |
||||
return 0, io.EOF |
||||
} else { |
||||
return n, err |
||||
} |
||||
} |
||||
|
||||
// Write writes to a file handle.
|
||||
func (f *win32File) Write(b []byte) (int, error) { |
||||
c, err := f.prepareIo() |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
defer f.wg.Done() |
||||
|
||||
if f.writeDeadline.timedout.isSet() { |
||||
return 0, ErrTimeout |
||||
} |
||||
|
||||
var bytes uint32 |
||||
err = syscall.WriteFile(f.handle, b, &bytes, &c.o) |
||||
n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) |
||||
runtime.KeepAlive(b) |
||||
return n, err |
||||
} |
||||
|
||||
func (f *win32File) SetReadDeadline(deadline time.Time) error { |
||||
return f.readDeadline.set(deadline) |
||||
} |
||||
|
||||
func (f *win32File) SetWriteDeadline(deadline time.Time) error { |
||||
return f.writeDeadline.set(deadline) |
||||
} |
||||
|
||||
func (f *win32File) Flush() error { |
||||
return syscall.FlushFileBuffers(f.handle) |
||||
} |
||||
|
||||
func (f *win32File) Fd() uintptr { |
||||
return uintptr(f.handle) |
||||
} |
||||
|
||||
func (d *deadlineHandler) set(deadline time.Time) error { |
||||
d.setLock.Lock() |
||||
defer d.setLock.Unlock() |
||||
|
||||
if d.timer != nil { |
||||
if !d.timer.Stop() { |
||||
<-d.channel |
||||
} |
||||
d.timer = nil |
||||
} |
||||
d.timedout.setFalse() |
||||
|
||||
select { |
||||
case <-d.channel: |
||||
d.channelLock.Lock() |
||||
d.channel = make(chan struct{}) |
||||
d.channelLock.Unlock() |
||||
default: |
||||
} |
||||
|
||||
if deadline.IsZero() { |
||||
return nil |
||||
} |
||||
|
||||
timeoutIO := func() { |
||||
d.timedout.setTrue() |
||||
close(d.channel) |
||||
} |
||||
|
||||
now := time.Now() |
||||
duration := deadline.Sub(now) |
||||
if deadline.After(now) { |
||||
// Deadline is in the future, set a timer to wait
|
||||
d.timer = time.AfterFunc(duration, timeoutIO) |
||||
} else { |
||||
// Deadline is in the past. Cancel all pending IO now.
|
||||
timeoutIO() |
||||
} |
||||
return nil |
||||
} |
@ -0,0 +1,61 @@ |
||||
// +build windows
|
||||
|
||||
package winio |
||||
|
||||
import ( |
||||
"os" |
||||
"runtime" |
||||
"syscall" |
||||
"unsafe" |
||||
) |
||||
|
||||
//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx
|
||||
//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle
|
||||
|
||||
const ( |
||||
fileBasicInfo = 0 |
||||
fileIDInfo = 0x12 |
||||
) |
||||
|
||||
// FileBasicInfo contains file access time and file attributes information.
|
||||
type FileBasicInfo struct { |
||||
CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime |
||||
FileAttributes uint32 |
||||
pad uint32 // padding
|
||||
} |
||||
|
||||
// GetFileBasicInfo retrieves times and attributes for a file.
|
||||
func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { |
||||
bi := &FileBasicInfo{} |
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { |
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} |
||||
} |
||||
runtime.KeepAlive(f) |
||||
return bi, nil |
||||
} |
||||
|
||||
// SetFileBasicInfo sets times and attributes for a file.
|
||||
func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { |
||||
if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { |
||||
return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} |
||||
} |
||||
runtime.KeepAlive(f) |
||||
return nil |
||||
} |
||||
|
||||
// FileIDInfo contains the volume serial number and file ID for a file. This pair should be
|
||||
// unique on a system.
|
||||
type FileIDInfo struct { |
||||
VolumeSerialNumber uint64 |
||||
FileID [16]byte |
||||
} |
||||
|
||||
// GetFileID retrieves the unique (volume, file ID) pair for a file.
|
||||
func GetFileID(f *os.File) (*FileIDInfo, error) { |
||||
fileID := &FileIDInfo{} |
||||
if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { |
||||
return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} |
||||
} |
||||
runtime.KeepAlive(f) |
||||
return fileID, nil |
||||
} |
@ -0,0 +1,9 @@ |
||||
module github.com/Microsoft/go-winio |
||||
|
||||
go 1.12 |
||||
|
||||
require ( |
||||
github.com/pkg/errors v0.8.1 |
||||
github.com/sirupsen/logrus v1.4.1 |
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b |
||||
) |
@ -0,0 +1,16 @@ |
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= |
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= |
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= |
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= |
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= |
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= |
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= |
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= |
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= |
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= |
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
@ -0,0 +1,305 @@ |
||||
package winio |
||||
|
||||
import ( |
||||
"fmt" |
||||
"io" |
||||
"net" |
||||
"os" |
||||
"syscall" |
||||
"time" |
||||
"unsafe" |
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid" |
||||
) |
||||
|
||||
//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind
|
||||
|
||||
const ( |
||||
afHvSock = 34 // AF_HYPERV
|
||||
|
||||
socketError = ^uintptr(0) |
||||
) |
||||
|
||||
// An HvsockAddr is an address for a AF_HYPERV socket.
|
||||
type HvsockAddr struct { |
||||
VMID guid.GUID |
||||
ServiceID guid.GUID |
||||
} |
||||
|
||||
type rawHvsockAddr struct { |
||||
Family uint16 |
||||
_ uint16 |
||||
VMID guid.GUID |
||||
ServiceID guid.GUID |
||||
} |
||||
|
||||
// Network returns the address's network name, "hvsock".
|
||||
func (addr *HvsockAddr) Network() string { |
||||
return "hvsock" |
||||
} |
||||
|
||||
func (addr *HvsockAddr) String() string { |
||||
return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) |
||||
} |
||||
|
||||
// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port.
|
||||
func VsockServiceID(port uint32) guid.GUID { |
||||
g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") |
||||
g.Data1 = port |
||||
return g |
||||
} |
||||
|
||||
func (addr *HvsockAddr) raw() rawHvsockAddr { |
||||
return rawHvsockAddr{ |
||||
Family: afHvSock, |
||||
VMID: addr.VMID, |
||||
ServiceID: addr.ServiceID, |
||||
} |
||||
} |
||||
|
||||
func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { |
||||
addr.VMID = raw.VMID |
||||
addr.ServiceID = raw.ServiceID |
||||
} |
||||
|
||||
// HvsockListener is a socket listener for the AF_HYPERV address family.
|
||||
type HvsockListener struct { |
||||
sock *win32File |
||||
addr HvsockAddr |
||||
} |
||||
|
||||
// HvsockConn is a connected socket of the AF_HYPERV address family.
|
||||
type HvsockConn struct { |
||||
sock *win32File |
||||
local, remote HvsockAddr |
||||
} |
||||
|
||||
func newHvSocket() (*win32File, error) { |
||||
fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) |
||||
if err != nil { |
||||
return nil, os.NewSyscallError("socket", err) |
||||
} |
||||
f, err := makeWin32File(fd) |
||||
if err != nil { |
||||
syscall.Close(fd) |
||||
return nil, err |
||||
} |
||||
f.socket = true |
||||
return f, nil |
||||
} |
||||
|
||||
// ListenHvsock listens for connections on the specified hvsock address.
|
||||
func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { |
||||
l := &HvsockListener{addr: *addr} |
||||
sock, err := newHvSocket() |
||||
if err != nil { |
||||
return nil, l.opErr("listen", err) |
||||
} |
||||
sa := addr.raw() |
||||
err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) |
||||
if err != nil { |
||||
return nil, l.opErr("listen", os.NewSyscallError("socket", err)) |
||||
} |
||||
err = syscall.Listen(sock.handle, 16) |
||||
if err != nil { |
||||
return nil, l.opErr("listen", os.NewSyscallError("listen", err)) |
||||
} |
||||
return &HvsockListener{sock: sock, addr: *addr}, nil |
||||
} |
||||
|
||||
func (l *HvsockListener) opErr(op string, err error) error { |
||||
return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} |
||||
} |
||||
|
||||
// Addr returns the listener's network address.
|
||||
func (l *HvsockListener) Addr() net.Addr { |
||||
return &l.addr |
||||
} |
||||
|
||||
// Accept waits for the next connection and returns it.
|
||||
func (l *HvsockListener) Accept() (_ net.Conn, err error) { |
||||
sock, err := newHvSocket() |
||||
if err != nil { |
||||
return nil, l.opErr("accept", err) |
||||
} |
||||
defer func() { |
||||
if sock != nil { |
||||
sock.Close() |
||||
} |
||||
}() |
||||
c, err := l.sock.prepareIo() |
||||
if err != nil { |
||||
return nil, l.opErr("accept", err) |
||||
} |
||||
defer l.sock.wg.Done() |
||||
|
||||
// AcceptEx, per documentation, requires an extra 16 bytes per address.
|
||||
const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) |
||||
var addrbuf [addrlen * 2]byte |
||||
|
||||
var bytes uint32 |
||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) |
||||
_, err = l.sock.asyncIo(c, nil, bytes, err) |
||||
if err != nil { |
||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) |
||||
} |
||||
conn := &HvsockConn{ |
||||
sock: sock, |
||||
} |
||||
conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) |
||||
conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) |
||||
sock = nil |
||||
return conn, nil |
||||
} |
||||
|
||||
// Close closes the listener, causing any pending Accept calls to fail.
|
||||
func (l *HvsockListener) Close() error { |
||||
return l.sock.Close() |
||||
} |
||||
|
||||
/* Need to finish ConnectEx handling |
||||
func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { |
||||
sock, err := newHvSocket() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer func() { |
||||
if sock != nil { |
||||
sock.Close() |
||||
} |
||||
}() |
||||
c, err := sock.prepareIo() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
defer sock.wg.Done() |
||||
var bytes uint32 |
||||
err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) |
||||
_, err = sock.asyncIo(ctx, c, nil, bytes, err) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
conn := &HvsockConn{ |
||||
sock: sock, |
||||
remote: *addr, |
||||
} |
||||
sock = nil |
||||
return conn, nil |
||||
} |
||||
*/ |
||||
|
||||
func (conn *HvsockConn) opErr(op string, err error) error { |
||||
return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} |
||||
} |
||||
|
||||
func (conn *HvsockConn) Read(b []byte) (int, error) { |
||||
c, err := conn.sock.prepareIo() |
||||
if err != nil { |
||||
return 0, conn.opErr("read", err) |
||||
} |
||||
defer conn.sock.wg.Done() |
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} |
||||
var flags, bytes uint32 |
||||
err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) |
||||
n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) |
||||
if err != nil { |
||||
if _, ok := err.(syscall.Errno); ok { |
||||
err = os.NewSyscallError("wsarecv", err) |
||||
} |
||||
return 0, conn.opErr("read", err) |
||||
} else if n == 0 { |
||||
err = io.EOF |
||||
} |
||||
return n, err |
||||
} |
||||
|
||||
func (conn *HvsockConn) Write(b []byte) (int, error) { |
||||
t := 0 |
||||
for len(b) != 0 { |
||||
n, err := conn.write(b) |
||||
if err != nil { |
||||
return t + n, err |
||||
} |
||||
t += n |
||||
b = b[n:] |
||||
} |
||||
return t, nil |
||||
} |
||||
|
||||
func (conn *HvsockConn) write(b []byte) (int, error) { |
||||
c, err := conn.sock.prepareIo() |
||||
if err != nil { |
||||
return 0, conn.opErr("write", err) |
||||
} |
||||
defer conn.sock.wg.Done() |
||||
buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} |
||||
var bytes uint32 |
||||
err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) |
||||
n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) |
||||
if err != nil { |
||||
if _, ok := err.(syscall.Errno); ok { |
||||
err = os.NewSyscallError("wsasend", err) |
||||
} |
||||
return 0, conn.opErr("write", err) |
||||
} |
||||
return n, err |
||||
} |
||||
|
||||
// Close closes the socket connection, failing any pending read or write calls.
|
||||
func (conn *HvsockConn) Close() error { |
||||
return conn.sock.Close() |
||||
} |
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error { |
||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) |
||||
if err != nil { |
||||
return os.NewSyscallError("shutdown", err) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// CloseRead shuts down the read end of the socket.
|
||||
func (conn *HvsockConn) CloseRead() error { |
||||
err := conn.shutdown(syscall.SHUT_RD) |
||||
if err != nil { |
||||
return conn.opErr("close", err) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||
// no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error { |
||||
err := conn.shutdown(syscall.SHUT_WR) |
||||
if err != nil { |
||||
return conn.opErr("close", err) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// LocalAddr returns the local address of the connection.
|
||||
func (conn *HvsockConn) LocalAddr() net.Addr { |
||||
return &conn.local |
||||
} |
||||
|
||||
// RemoteAddr returns the remote address of the connection.
|
||||
func (conn *HvsockConn) RemoteAddr() net.Addr { |
||||
return &conn.remote |
||||
} |
||||
|
||||
// SetDeadline implements the net.Conn SetDeadline method.
|
||||
func (conn *HvsockConn) SetDeadline(t time.Time) error { |
||||
conn.SetReadDeadline(t) |
||||
conn.SetWriteDeadline(t) |
||||
return nil |
||||
} |
||||
|
||||
// SetReadDeadline implements the net.Conn SetReadDeadline method.
|
||||
func (conn *HvsockConn) SetReadDeadline(t time.Time) error { |
||||
return conn.sock.SetReadDeadline(t) |
||||
} |
||||
|
||||
// SetWriteDeadline implements the net.Conn SetWriteDeadline method.
|
||||
func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { |
||||
return conn.sock.SetWriteDeadline(t) |
||||
} |
@ -0,0 +1,510 @@ |
||||
// +build windows
|
||||
|
||||
package winio |
||||
|
||||
import ( |
||||
"context" |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"net" |
||||
"os" |
||||
"runtime" |
||||
"syscall" |
||||
"time" |
||||
"unsafe" |
||||
) |
||||
|
||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||
//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile
|
||||
//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb
|
||||
//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U
|
||||
//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl
|
||||
|
||||
type ioStatusBlock struct { |
||||
Status, Information uintptr |
||||
} |
||||
|
||||
type objectAttributes struct { |
||||
Length uintptr |
||||
RootDirectory uintptr |
||||
ObjectName *unicodeString |
||||
Attributes uintptr |
||||
SecurityDescriptor *securityDescriptor |
||||
SecurityQoS uintptr |
||||
} |
||||
|
||||
type unicodeString struct { |
||||
Length uint16 |
||||
MaximumLength uint16 |
||||
Buffer uintptr |
||||
} |
||||
|
||||
type securityDescriptor struct { |
||||
Revision byte |
||||
Sbz1 byte |
||||
Control uint16 |
||||
Owner uintptr |
||||
Group uintptr |
||||
Sacl uintptr |
||||
Dacl uintptr |
||||
} |
||||
|
||||
type ntstatus int32 |
||||
|
||||
func (status ntstatus) Err() error { |
||||
if status >= 0 { |
||||
return nil |
||||
} |
||||
return rtlNtStatusToDosError(status) |
||||
} |
||||
|
||||
const ( |
||||
cERROR_PIPE_BUSY = syscall.Errno(231) |
||||
cERROR_NO_DATA = syscall.Errno(232) |
||||
cERROR_PIPE_CONNECTED = syscall.Errno(535) |
||||
cERROR_SEM_TIMEOUT = syscall.Errno(121) |
||||
|
||||
cSECURITY_SQOS_PRESENT = 0x100000 |
||||
cSECURITY_ANONYMOUS = 0 |
||||
|
||||
cPIPE_TYPE_MESSAGE = 4 |
||||
|
||||
cPIPE_READMODE_MESSAGE = 2 |
||||
|
||||
cFILE_OPEN = 1 |
||||
cFILE_CREATE = 2 |
||||
|
||||
cFILE_PIPE_MESSAGE_TYPE = 1 |
||||
cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 |
||||
|
||||
cSE_DACL_PRESENT = 4 |
||||
) |
||||
|
||||
var ( |
||||
// ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed.
|
||||
// This error should match net.errClosing since docker takes a dependency on its text.
|
||||
ErrPipeListenerClosed = errors.New("use of closed network connection") |
||||
|
||||
errPipeWriteClosed = errors.New("pipe has been closed for write") |
||||
) |
||||
|
||||
type win32Pipe struct { |
||||
*win32File |
||||
path string |
||||
} |
||||
|
||||
type win32MessageBytePipe struct { |
||||
win32Pipe |
||||
writeClosed bool |
||||
readEOF bool |
||||
} |
||||
|
||||
type pipeAddress string |
||||
|
||||
func (f *win32Pipe) LocalAddr() net.Addr { |
||||
return pipeAddress(f.path) |
||||
} |
||||
|
||||
func (f *win32Pipe) RemoteAddr() net.Addr { |
||||
return pipeAddress(f.path) |
||||
} |
||||
|
||||
func (f *win32Pipe) SetDeadline(t time.Time) error { |
||||
f.SetReadDeadline(t) |
||||
f.SetWriteDeadline(t) |
||||
return nil |
||||
} |
||||
|
||||
// CloseWrite closes the write side of a message pipe in byte mode.
|
||||
func (f *win32MessageBytePipe) CloseWrite() error { |
||||
if f.writeClosed { |
||||
return errPipeWriteClosed |
||||
} |
||||
err := f.win32File.Flush() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
_, err = f.win32File.Write(nil) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
f.writeClosed = true |
||||
return nil |
||||
} |
||||
|
||||
// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since
|
||||
// they are used to implement CloseWrite().
|
||||
func (f *win32MessageBytePipe) Write(b []byte) (int, error) { |
||||
if f.writeClosed { |
||||
return 0, errPipeWriteClosed |
||||
} |
||||
if len(b) == 0 { |
||||
return 0, nil |
||||
} |
||||
return f.win32File.Write(b) |
||||
} |
||||
|
||||
// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message
|
||||
// mode pipe will return io.EOF, as will all subsequent reads.
|
||||
func (f *win32MessageBytePipe) Read(b []byte) (int, error) { |
||||
if f.readEOF { |
||||
return 0, io.EOF |
||||
} |
||||
n, err := f.win32File.Read(b) |
||||
if err == io.EOF { |
||||
// If this was the result of a zero-byte read, then
|
||||
// it is possible that the read was due to a zero-size
|
||||
// message. Since we are simulating CloseWrite with a
|
||||
// zero-byte message, ensure that all future Read() calls
|
||||
// also return EOF.
|
||||
f.readEOF = true |
||||
} else if err == syscall.ERROR_MORE_DATA { |
||||
// ERROR_MORE_DATA indicates that the pipe's read mode is message mode
|
||||
// and the message still has more bytes. Treat this as a success, since
|
||||
// this package presents all named pipes as byte streams.
|
||||
err = nil |
||||
} |
||||
return n, err |
||||
} |
||||
|
||||
func (s pipeAddress) Network() string { |
||||
return "pipe" |
||||
} |
||||
|
||||
func (s pipeAddress) String() string { |
||||
return string(s) |
||||
} |
||||
|
||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||
func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) { |
||||
for { |
||||
select { |
||||
case <-ctx.Done(): |
||||
return syscall.Handle(0), ctx.Err() |
||||
default: |
||||
h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) |
||||
if err == nil { |
||||
return h, nil |
||||
} |
||||
if err != cERROR_PIPE_BUSY { |
||||
return h, &os.PathError{Err: err, Op: "open", Path: *path} |
||||
} |
||||
// Wait 10 msec and try again. This is a rather simplistic
|
||||
// view, as we always try each 10 milliseconds.
|
||||
time.Sleep(time.Millisecond * 10) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// DialPipe connects to a named pipe by path, timing out if the connection
|
||||
// takes longer than the specified duration. If timeout is nil, then we use
|
||||
// a default timeout of 2 seconds. (We do not use WaitNamedPipe.)
|
||||
func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { |
||||
var absTimeout time.Time |
||||
if timeout != nil { |
||||
absTimeout = time.Now().Add(*timeout) |
||||
} else { |
||||
absTimeout = time.Now().Add(time.Second * 2) |
||||
} |
||||
ctx, _ := context.WithDeadline(context.Background(), absTimeout) |
||||
conn, err := DialPipeContext(ctx, path) |
||||
if err == context.DeadlineExceeded { |
||||
return nil, ErrTimeout |
||||
} |
||||
return conn, err |
||||
} |
||||
|
||||
// DialPipeContext attempts to connect to a named pipe by `path` until `ctx`
|
||||
// cancellation or timeout.
|
||||
func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { |
||||
var err error |
||||
var h syscall.Handle |
||||
h, err = tryDialPipe(ctx, &path) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
var flags uint32 |
||||
err = getNamedPipeInfo(h, &flags, nil, nil, nil) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
f, err := makeWin32File(h) |
||||
if err != nil { |
||||
syscall.Close(h) |
||||
return nil, err |
||||
} |
||||
|
||||
// If the pipe is in message mode, return a message byte pipe, which
|
||||
// supports CloseWrite().
|
||||
if flags&cPIPE_TYPE_MESSAGE != 0 { |
||||
return &win32MessageBytePipe{ |
||||
win32Pipe: win32Pipe{win32File: f, path: path}, |
||||
}, nil |
||||
} |
||||
return &win32Pipe{win32File: f, path: path}, nil |
||||
} |
||||
|
||||
type acceptResponse struct { |
||||
f *win32File |
||||
err error |
||||
} |
||||
|
||||
type win32PipeListener struct { |
||||
firstHandle syscall.Handle |
||||
path string |
||||
config PipeConfig |
||||
acceptCh chan (chan acceptResponse) |
||||
closeCh chan int |
||||
doneCh chan int |
||||
} |
||||
|
||||
func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { |
||||
path16, err := syscall.UTF16FromString(path) |
||||
if err != nil { |
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err} |
||||
} |
||||
|
||||
var oa objectAttributes |
||||
oa.Length = unsafe.Sizeof(oa) |
||||
|
||||
var ntPath unicodeString |
||||
if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { |
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err} |
||||
} |
||||
defer localFree(ntPath.Buffer) |
||||
oa.ObjectName = &ntPath |
||||
|
||||
// The security descriptor is only needed for the first pipe.
|
||||
if first { |
||||
if sd != nil { |
||||
len := uint32(len(sd)) |
||||
sdb := localAlloc(0, len) |
||||
defer localFree(sdb) |
||||
copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) |
||||
oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) |
||||
} else { |
||||
// Construct the default named pipe security descriptor.
|
||||
var dacl uintptr |
||||
if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { |
||||
return 0, fmt.Errorf("getting default named pipe ACL: %s", err) |
||||
} |
||||
defer localFree(dacl) |
||||
|
||||
sdb := &securityDescriptor{ |
||||
Revision: 1, |
||||
Control: cSE_DACL_PRESENT, |
||||
Dacl: dacl, |
||||
} |
||||
oa.SecurityDescriptor = sdb |
||||
} |
||||
} |
||||
|
||||
typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) |
||||
if c.MessageMode { |
||||
typ |= cFILE_PIPE_MESSAGE_TYPE |
||||
} |
||||
|
||||
disposition := uint32(cFILE_OPEN) |
||||
access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) |
||||
if first { |
||||
disposition = cFILE_CREATE |
||||
// By not asking for read or write access, the named pipe file system
|
||||
// will put this pipe into an initially disconnected state, blocking
|
||||
// client connections until the next call with first == false.
|
||||
access = syscall.SYNCHRONIZE |
||||
} |
||||
|
||||
timeout := int64(-50 * 10000) // 50ms
|
||||
|
||||
var ( |
||||
h syscall.Handle |
||||
iosb ioStatusBlock |
||||
) |
||||
err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() |
||||
if err != nil { |
||||
return 0, &os.PathError{Op: "open", Path: path, Err: err} |
||||
} |
||||
|
||||
runtime.KeepAlive(ntPath) |
||||
return h, nil |
||||
} |
||||
|
||||
func (l *win32PipeListener) makeServerPipe() (*win32File, error) { |
||||
h, err := makeServerPipeHandle(l.path, nil, &l.config, false) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
f, err := makeWin32File(h) |
||||
if err != nil { |
||||
syscall.Close(h) |
||||
return nil, err |
||||
} |
||||
return f, nil |
||||
} |
||||
|
||||
func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { |
||||
p, err := l.makeServerPipe() |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
|
||||
// Wait for the client to connect.
|
||||
ch := make(chan error) |
||||
go func(p *win32File) { |
||||
ch <- connectPipe(p) |
||||
}(p) |
||||
|
||||
select { |
||||
case err = <-ch: |
||||
if err != nil { |
||||
p.Close() |
||||
p = nil |
||||
} |
||||
case <-l.closeCh: |
||||
// Abort the connect request by closing the handle.
|
||||
p.Close() |
||||
p = nil |
||||
err = <-ch |
||||
if err == nil || err == ErrFileClosed { |
||||
err = ErrPipeListenerClosed |
||||
} |
||||
} |
||||
return p, err |
||||
} |
||||
|
||||
func (l *win32PipeListener) listenerRoutine() { |
||||
closed := false |
||||
for !closed { |
||||
select { |
||||
case <-l.closeCh: |
||||
closed = true |
||||
case responseCh := <-l.acceptCh: |
||||
var ( |
||||
p *win32File |
||||
err error |
||||
) |
||||
for { |
||||
p, err = l.makeConnectedServerPipe() |
||||
// If the connection was immediately closed by the client, try
|
||||
// again.
|
||||
if err != cERROR_NO_DATA { |
||||
break |
||||
} |
||||
} |
||||
responseCh <- acceptResponse{p, err} |
||||
closed = err == ErrPipeListenerClosed |
||||
} |
||||
} |
||||
syscall.Close(l.firstHandle) |
||||
l.firstHandle = 0 |
||||
// Notify Close() and Accept() callers that the handle has been closed.
|
||||
close(l.doneCh) |
||||
} |
||||
|
||||
// PipeConfig contain configuration for the pipe listener.
|
||||
type PipeConfig struct { |
||||
// SecurityDescriptor contains a Windows security descriptor in SDDL format.
|
||||
SecurityDescriptor string |
||||
|
||||
// MessageMode determines whether the pipe is in byte or message mode. In either
|
||||
// case the pipe is read in byte mode by default. The only practical difference in
|
||||
// this implementation is that CloseWrite() is only supported for message mode pipes;
|
||||
// CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only
|
||||
// transferred to the reader (and returned as io.EOF in this implementation)
|
||||
// when the pipe is in message mode.
|
||||
MessageMode bool |
||||
|
||||
// InputBufferSize specifies the size the input buffer, in bytes.
|
||||
InputBufferSize int32 |
||||
|
||||
// OutputBufferSize specifies the size the input buffer, in bytes.
|
||||
OutputBufferSize int32 |
||||
} |
||||
|
||||
// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe.
|
||||
// The pipe must not already exist.
|
||||
func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { |
||||
var ( |
||||
sd []byte |
||||
err error |
||||
) |
||||
if c == nil { |
||||
c = &PipeConfig{} |
||||
} |
||||
if c.SecurityDescriptor != "" { |
||||
sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
h, err := makeServerPipeHandle(path, sd, c, true) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
l := &win32PipeListener{ |
||||
firstHandle: h, |
||||
path: path, |
||||
config: *c, |
||||
acceptCh: make(chan (chan acceptResponse)), |
||||
closeCh: make(chan int), |
||||
doneCh: make(chan int), |
||||
} |
||||
go l.listenerRoutine() |
||||
return l, nil |
||||
} |
||||
|
||||
func connectPipe(p *win32File) error { |
||||
c, err := p.prepareIo() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer p.wg.Done() |
||||
|
||||
err = connectNamedPipe(p.handle, &c.o) |
||||
_, err = p.asyncIo(c, nil, 0, err) |
||||
if err != nil && err != cERROR_PIPE_CONNECTED { |
||||
return err |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (l *win32PipeListener) Accept() (net.Conn, error) { |
||||
ch := make(chan acceptResponse) |
||||
select { |
||||
case l.acceptCh <- ch: |
||||
response := <-ch |
||||
err := response.err |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if l.config.MessageMode { |
||||
return &win32MessageBytePipe{ |
||||
win32Pipe: win32Pipe{win32File: response.f, path: l.path}, |
||||
}, nil |
||||
} |
||||
return &win32Pipe{win32File: response.f, path: l.path}, nil |
||||
case <-l.doneCh: |
||||
return nil, ErrPipeListenerClosed |
||||
} |
||||
} |
||||
|
||||
func (l *win32PipeListener) Close() error { |
||||
select { |
||||
case l.closeCh <- 1: |
||||
<-l.doneCh |
||||
case <-l.doneCh: |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (l *win32PipeListener) Addr() net.Addr { |
||||
return pipeAddress(l.path) |
||||
} |
@ -0,0 +1,235 @@ |
||||
// Package guid provides a GUID type. The backing structure for a GUID is
|
||||
// identical to that used by the golang.org/x/sys/windows GUID type.
|
||||
// There are two main binary encodings used for a GUID, the big-endian encoding,
|
||||
// and the Windows (mixed-endian) encoding. See here for details:
|
||||
// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding
|
||||
package guid |
||||
|
||||
import ( |
||||
"crypto/rand" |
||||
"crypto/sha1" |
||||
"encoding" |
||||
"encoding/binary" |
||||
"fmt" |
||||
"strconv" |
||||
|
||||
"golang.org/x/sys/windows" |
||||
) |
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
// how the entirety of the rest of the GUID is interpreted.
|
||||
type Variant uint8 |
||||
|
||||
// The variants specified by RFC 4122.
|
||||
const ( |
||||
// VariantUnknown specifies a GUID variant which does not conform to one of
|
||||
// the variant encodings specified in RFC 4122.
|
||||
VariantUnknown Variant = iota |
||||
VariantNCS |
||||
VariantRFC4122 |
||||
VariantMicrosoft |
||||
VariantFuture |
||||
) |
||||
|
||||
// Version specifies how the bits in the GUID were generated. For instance, a
|
||||
// version 4 GUID is randomly generated, and a version 5 is generated from the
|
||||
// hash of an input string.
|
||||
type Version uint8 |
||||
|
||||
var _ = (encoding.TextMarshaler)(GUID{}) |
||||
var _ = (encoding.TextUnmarshaler)(&GUID{}) |
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID |
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) { |
||||
var b [16]byte |
||||
if _, err := rand.Read(b[:]); err != nil { |
||||
return GUID{}, err |
||||
} |
||||
|
||||
g := FromArray(b) |
||||
g.setVersion(4) // Version 4 means randomly generated.
|
||||
g.setVariant(VariantRFC4122) |
||||
|
||||
return g, nil |
||||
} |
||||
|
||||
// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing)
|
||||
// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name,
|
||||
// and the sample code treats it as a series of bytes, so we do the same here.
|
||||
//
|
||||
// Some implementations, such as those found on Windows, treat the name as a
|
||||
// big-endian UTF16 stream of bytes. If that is desired, the string can be
|
||||
// encoded as such before being passed to this function.
|
||||
func NewV5(namespace GUID, name []byte) (GUID, error) { |
||||
b := sha1.New() |
||||
namespaceBytes := namespace.ToArray() |
||||
b.Write(namespaceBytes[:]) |
||||
b.Write(name) |
||||
|
||||
a := [16]byte{} |
||||
copy(a[:], b.Sum(nil)) |
||||
|
||||
g := FromArray(a) |
||||
g.setVersion(5) // Version 5 means generated from a string.
|
||||
g.setVariant(VariantRFC4122) |
||||
|
||||
return g, nil |
||||
} |
||||
|
||||
func fromArray(b [16]byte, order binary.ByteOrder) GUID { |
||||
var g GUID |
||||
g.Data1 = order.Uint32(b[0:4]) |
||||
g.Data2 = order.Uint16(b[4:6]) |
||||
g.Data3 = order.Uint16(b[6:8]) |
||||
copy(g.Data4[:], b[8:16]) |
||||
return g |
||||
} |
||||
|
||||
func (g GUID) toArray(order binary.ByteOrder) [16]byte { |
||||
b := [16]byte{} |
||||
order.PutUint32(b[0:4], g.Data1) |
||||
order.PutUint16(b[4:6], g.Data2) |
||||
order.PutUint16(b[6:8], g.Data3) |
||||
copy(b[8:16], g.Data4[:]) |
||||
return b |
||||
} |
||||
|
||||
// FromArray constructs a GUID from a big-endian encoding array of 16 bytes.
|
||||
func FromArray(b [16]byte) GUID { |
||||
return fromArray(b, binary.BigEndian) |
||||
} |
||||
|
||||
// ToArray returns an array of 16 bytes representing the GUID in big-endian
|
||||
// encoding.
|
||||
func (g GUID) ToArray() [16]byte { |
||||
return g.toArray(binary.BigEndian) |
||||
} |
||||
|
||||
// FromWindowsArray constructs a GUID from a Windows encoding array of bytes.
|
||||
func FromWindowsArray(b [16]byte) GUID { |
||||
return fromArray(b, binary.LittleEndian) |
||||
} |
||||
|
||||
// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows
|
||||
// encoding.
|
||||
func (g GUID) ToWindowsArray() [16]byte { |
||||
return g.toArray(binary.LittleEndian) |
||||
} |
||||
|
||||
func (g GUID) String() string { |
||||
return fmt.Sprintf( |
||||
"%08x-%04x-%04x-%04x-%012x", |
||||
g.Data1, |
||||
g.Data2, |
||||
g.Data3, |
||||
g.Data4[:2], |
||||
g.Data4[2:]) |
||||
} |
||||
|
||||
// FromString parses a string containing a GUID and returns the GUID. The only
|
||||
// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`
|
||||
// format.
|
||||
func FromString(s string) (GUID, error) { |
||||
if len(s) != 36 { |
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s) |
||||
} |
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { |
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s) |
||||
} |
||||
|
||||
var g GUID |
||||
|
||||
data1, err := strconv.ParseUint(s[0:8], 16, 32) |
||||
if err != nil { |
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s) |
||||
} |
||||
g.Data1 = uint32(data1) |
||||
|
||||
data2, err := strconv.ParseUint(s[9:13], 16, 16) |
||||
if err != nil { |
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s) |
||||
} |
||||
g.Data2 = uint16(data2) |
||||
|
||||
data3, err := strconv.ParseUint(s[14:18], 16, 16) |
||||
if err != nil { |
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s) |
||||
} |
||||
g.Data3 = uint16(data3) |
||||
|
||||
for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { |
||||
v, err := strconv.ParseUint(s[x:x+2], 16, 8) |
||||
if err != nil { |
||||
return GUID{}, fmt.Errorf("invalid GUID %q", s) |
||||
} |
||||
g.Data4[i] = uint8(v) |
||||
} |
||||
|
||||
return g, nil |
||||
} |
||||
|
||||
func (g *GUID) setVariant(v Variant) { |
||||
d := g.Data4[0] |
||||
switch v { |
||||
case VariantNCS: |
||||
d = (d & 0x7f) |
||||
case VariantRFC4122: |
||||
d = (d & 0x3f) | 0x80 |
||||
case VariantMicrosoft: |
||||
d = (d & 0x1f) | 0xc0 |
||||
case VariantFuture: |
||||
d = (d & 0x0f) | 0xe0 |
||||
case VariantUnknown: |
||||
fallthrough |
||||
default: |
||||
panic(fmt.Sprintf("invalid variant: %d", v)) |
||||
} |
||||
g.Data4[0] = d |
||||
} |
||||
|
||||
// Variant returns the GUID variant, as defined in RFC 4122.
|
||||
func (g GUID) Variant() Variant { |
||||
b := g.Data4[0] |
||||
if b&0x80 == 0 { |
||||
return VariantNCS |
||||
} else if b&0xc0 == 0x80 { |
||||
return VariantRFC4122 |
||||
} else if b&0xe0 == 0xc0 { |
||||
return VariantMicrosoft |
||||
} else if b&0xe0 == 0xe0 { |
||||
return VariantFuture |
||||
} |
||||
return VariantUnknown |
||||
} |
||||
|
||||
func (g *GUID) setVersion(v Version) { |
||||
g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) |
||||
} |
||||
|
||||
// Version returns the GUID version, as defined in RFC 4122.
|
||||
func (g GUID) Version() Version { |
||||
return Version((g.Data3 & 0xF000) >> 12) |
||||
} |
||||
|
||||
// MarshalText returns the textual representation of the GUID.
|
||||
func (g GUID) MarshalText() ([]byte, error) { |
||||
return []byte(g.String()), nil |
||||
} |
||||
|
||||
// UnmarshalText takes the textual representation of a GUID, and unmarhals it
|
||||
// into this GUID.
|
||||
func (g *GUID) UnmarshalText(text []byte) error { |
||||
g2, err := FromString(string(text)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
*g = g2 |
||||
return nil |
||||
} |
@ -0,0 +1,202 @@ |
||||
// +build windows
|
||||
|
||||
package winio |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"fmt" |
||||
"runtime" |
||||
"sync" |
||||
"syscall" |
||||
"unicode/utf16" |
||||
|
||||
"golang.org/x/sys/windows" |
||||
) |
||||
|
||||
//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges
|
||||
//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf
|
||||
//sys revertToSelf() (err error) = advapi32.RevertToSelf
|
||||
//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken
|
||||
//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread
|
||||
//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW
|
||||
//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW
|
||||
//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW
|
||||
|
||||
const ( |
||||
SE_PRIVILEGE_ENABLED = 2 |
||||
|
||||
ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 |
||||
|
||||
SeBackupPrivilege = "SeBackupPrivilege" |
||||
SeRestorePrivilege = "SeRestorePrivilege" |
||||
) |
||||
|
||||
const ( |
||||
securityAnonymous = iota |
||||
securityIdentification |
||||
securityImpersonation |
||||
securityDelegation |
||||
) |
||||
|
||||
var ( |
||||
privNames = make(map[string]uint64) |
||||
privNameMutex sync.Mutex |
||||
) |
||||
|
||||
// PrivilegeError represents an error enabling privileges.
|
||||
type PrivilegeError struct { |
||||
privileges []uint64 |
||||
} |
||||
|
||||
func (e *PrivilegeError) Error() string { |
||||
s := "" |
||||
if len(e.privileges) > 1 { |
||||
s = "Could not enable privileges " |
||||
} else { |
||||
s = "Could not enable privilege " |
||||
} |
||||
for i, p := range e.privileges { |
||||
if i != 0 { |
||||
s += ", " |
||||
} |
||||
s += `"` |
||||
s += getPrivilegeName(p) |
||||
s += `"` |
||||
} |
||||
return s |
||||
} |
||||
|
||||
// RunWithPrivilege enables a single privilege for a function call.
|
||||
func RunWithPrivilege(name string, fn func() error) error { |
||||
return RunWithPrivileges([]string{name}, fn) |
||||
} |
||||
|
||||
// RunWithPrivileges enables privileges for a function call.
|
||||
func RunWithPrivileges(names []string, fn func() error) error { |
||||
privileges, err := mapPrivileges(names) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
runtime.LockOSThread() |
||||
defer runtime.UnlockOSThread() |
||||
token, err := newThreadToken() |
||||
if err != nil { |
||||
return err |
||||
} |
||||
defer releaseThreadToken(token) |
||||
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
return fn() |
||||
} |
||||
|
||||
func mapPrivileges(names []string) ([]uint64, error) { |
||||
var privileges []uint64 |
||||
privNameMutex.Lock() |
||||
defer privNameMutex.Unlock() |
||||
for _, name := range names { |
||||
p, ok := privNames[name] |
||||
if !ok { |
||||
err := lookupPrivilegeValue("", name, &p) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
privNames[name] = p |
||||
} |
||||
privileges = append(privileges, p) |
||||
} |
||||
return privileges, nil |
||||
} |
||||
|
||||
// EnableProcessPrivileges enables privileges globally for the process.
|
||||
func EnableProcessPrivileges(names []string) error { |
||||
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) |
||||
} |
||||
|
||||
// DisableProcessPrivileges disables privileges globally for the process.
|
||||
func DisableProcessPrivileges(names []string) error { |
||||
return enableDisableProcessPrivilege(names, 0) |
||||
} |
||||
|
||||
func enableDisableProcessPrivilege(names []string, action uint32) error { |
||||
privileges, err := mapPrivileges(names) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
p, _ := windows.GetCurrentProcess() |
||||
var token windows.Token |
||||
err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
defer token.Close() |
||||
return adjustPrivileges(token, privileges, action) |
||||
} |
||||
|
||||
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { |
||||
var b bytes.Buffer |
||||
binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) |
||||
for _, p := range privileges { |
||||
binary.Write(&b, binary.LittleEndian, p) |
||||
binary.Write(&b, binary.LittleEndian, action) |
||||
} |
||||
prevState := make([]byte, b.Len()) |
||||
reqSize := uint32(0) |
||||
success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) |
||||
if !success { |
||||
return err |
||||
} |
||||
if err == ERROR_NOT_ALL_ASSIGNED { |
||||
return &PrivilegeError{privileges} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func getPrivilegeName(luid uint64) string { |
||||
var nameBuffer [256]uint16 |
||||
bufSize := uint32(len(nameBuffer)) |
||||
err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) |
||||
if err != nil { |
||||
return fmt.Sprintf("<unknown privilege %d>", luid) |
||||
} |
||||
|
||||
var displayNameBuffer [256]uint16 |
||||
displayBufSize := uint32(len(displayNameBuffer)) |
||||
var langID uint32 |
||||
err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) |
||||
if err != nil { |
||||
return fmt.Sprintf("<unknown privilege %s>", string(utf16.Decode(nameBuffer[:bufSize]))) |
||||
} |
||||
|
||||
return string(utf16.Decode(displayNameBuffer[:displayBufSize])) |
||||
} |
||||
|
||||
func newThreadToken() (windows.Token, error) { |
||||
err := impersonateSelf(securityImpersonation) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
|
||||
var token windows.Token |
||||
err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) |
||||
if err != nil { |
||||
rerr := revertToSelf() |
||||
if rerr != nil { |
||||
panic(rerr) |
||||
} |
||||
return 0, err |
||||
} |
||||
return token, nil |
||||
} |
||||
|
||||
func releaseThreadToken(h windows.Token) { |
||||
err := revertToSelf() |
||||
if err != nil { |
||||
panic(err) |
||||
} |
||||
h.Close() |
||||
} |
@ -0,0 +1,128 @@ |
||||
package winio |
||||
|
||||
import ( |
||||
"bytes" |
||||
"encoding/binary" |
||||
"fmt" |
||||
"strings" |
||||
"unicode/utf16" |
||||
"unsafe" |
||||
) |
||||
|
||||
const ( |
||||
reparseTagMountPoint = 0xA0000003 |
||||
reparseTagSymlink = 0xA000000C |
||||
) |
||||
|
||||
type reparseDataBuffer struct { |
||||
ReparseTag uint32 |
||||
ReparseDataLength uint16 |
||||
Reserved uint16 |
||||
SubstituteNameOffset uint16 |
||||
SubstituteNameLength uint16 |
||||
PrintNameOffset uint16 |
||||
PrintNameLength uint16 |
||||
} |
||||
|
||||
// ReparsePoint describes a Win32 symlink or mount point.
|
||||
type ReparsePoint struct { |
||||
Target string |
||||
IsMountPoint bool |
||||
} |
||||
|
||||
// UnsupportedReparsePointError is returned when trying to decode a non-symlink or
|
||||
// mount point reparse point.
|
||||
type UnsupportedReparsePointError struct { |
||||
Tag uint32 |
||||
} |
||||
|
||||
func (e *UnsupportedReparsePointError) Error() string { |
||||
return fmt.Sprintf("unsupported reparse point %x", e.Tag) |
||||
} |
||||
|
||||
// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink
|
||||
// or a mount point.
|
||||
func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { |
||||
tag := binary.LittleEndian.Uint32(b[0:4]) |
||||
return DecodeReparsePointData(tag, b[8:]) |
||||
} |
||||
|
||||
func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { |
||||
isMountPoint := false |
||||
switch tag { |
||||
case reparseTagMountPoint: |
||||
isMountPoint = true |
||||
case reparseTagSymlink: |
||||
default: |
||||
return nil, &UnsupportedReparsePointError{tag} |
||||
} |
||||
nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) |
||||
if !isMountPoint { |
||||
nameOffset += 4 |
||||
} |
||||
nameLength := binary.LittleEndian.Uint16(b[6:8]) |
||||
name := make([]uint16, nameLength/2) |
||||
err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil |
||||
} |
||||
|
||||
func isDriveLetter(c byte) bool { |
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') |
||||
} |
||||
|
||||
// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or
|
||||
// mount point.
|
||||
func EncodeReparsePoint(rp *ReparsePoint) []byte { |
||||
// Generate an NT path and determine if this is a relative path.
|
||||
var ntTarget string |
||||
relative := false |
||||
if strings.HasPrefix(rp.Target, `\\?\`) { |
||||
ntTarget = `\??\` + rp.Target[4:] |
||||
} else if strings.HasPrefix(rp.Target, `\\`) { |
||||
ntTarget = `\??\UNC\` + rp.Target[2:] |
||||
} else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { |
||||
ntTarget = `\??\` + rp.Target |
||||
} else { |
||||
ntTarget = rp.Target |
||||
relative = true |
||||
} |
||||
|
||||
// The paths must be NUL-terminated even though they are counted strings.
|
||||
target16 := utf16.Encode([]rune(rp.Target + "\x00")) |
||||
ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) |
||||
|
||||
size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 |
||||
size += len(ntTarget16)*2 + len(target16)*2 |
||||
|
||||
tag := uint32(reparseTagMountPoint) |
||||
if !rp.IsMountPoint { |
||||
tag = reparseTagSymlink |
||||
size += 4 // Add room for symlink flags
|
||||
} |
||||
|
||||
data := reparseDataBuffer{ |
||||
ReparseTag: tag, |
||||
ReparseDataLength: uint16(size), |
||||
SubstituteNameOffset: 0, |
||||
SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), |
||||
PrintNameOffset: uint16(len(ntTarget16) * 2), |
||||
PrintNameLength: uint16((len(target16) - 1) * 2), |
||||
} |
||||
|
||||
var b bytes.Buffer |
||||
binary.Write(&b, binary.LittleEndian, &data) |
||||
if !rp.IsMountPoint { |
||||
flags := uint32(0) |
||||
if relative { |
||||
flags |= 1 |
||||
} |
||||
binary.Write(&b, binary.LittleEndian, flags) |
||||
} |
||||
|
||||
binary.Write(&b, binary.LittleEndian, ntTarget16) |
||||
binary.Write(&b, binary.LittleEndian, target16) |
||||
return b.Bytes() |
||||
} |
@ -0,0 +1,98 @@ |
||||
// +build windows
|
||||
|
||||
package winio |
||||
|
||||
import ( |
||||
"syscall" |
||||
"unsafe" |
||||
) |
||||
|
||||
//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW
|
||||
//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW
|
||||
//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
|
||||
//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
|
||||
//sys localFree(mem uintptr) = LocalFree
|
||||
//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength
|
||||
|
||||
const ( |
||||
cERROR_NONE_MAPPED = syscall.Errno(1332) |
||||
) |
||||
|
||||
type AccountLookupError struct { |
||||
Name string |
||||
Err error |
||||
} |
||||
|
||||
func (e *AccountLookupError) Error() string { |
||||
if e.Name == "" { |
||||
return "lookup account: empty account name specified" |
||||
} |
||||
var s string |
||||
switch e.Err { |
||||
case cERROR_NONE_MAPPED: |
||||
s = "not found" |
||||
default: |
||||
s = e.Err.Error() |
||||
} |
||||
return "lookup account " + e.Name + ": " + s |
||||
} |
||||
|
||||
type SddlConversionError struct { |
||||
Sddl string |
||||
Err error |
||||
} |
||||
|
||||
func (e *SddlConversionError) Error() string { |
||||
return "convert " + e.Sddl + ": " + e.Err.Error() |
||||
} |
||||
|
||||
// LookupSidByName looks up the SID of an account by name
|
||||
func LookupSidByName(name string) (sid string, err error) { |
||||
if name == "" { |
||||
return "", &AccountLookupError{name, cERROR_NONE_MAPPED} |
||||
} |
||||
|
||||
var sidSize, sidNameUse, refDomainSize uint32 |
||||
err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) |
||||
if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { |
||||
return "", &AccountLookupError{name, err} |
||||
} |
||||
sidBuffer := make([]byte, sidSize) |
||||
refDomainBuffer := make([]uint16, refDomainSize) |
||||
err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) |
||||
if err != nil { |
||||
return "", &AccountLookupError{name, err} |
||||
} |
||||
var strBuffer *uint16 |
||||
err = convertSidToStringSid(&sidBuffer[0], &strBuffer) |
||||
if err != nil { |
||||
return "", &AccountLookupError{name, err} |
||||
} |
||||
sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) |
||||
localFree(uintptr(unsafe.Pointer(strBuffer))) |
||||
return sid, nil |
||||
} |
||||
|
||||
func SddlToSecurityDescriptor(sddl string) ([]byte, error) { |
||||
var sdBuffer uintptr |
||||
err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) |
||||
if err != nil { |
||||
return nil, &SddlConversionError{sddl, err} |
||||
} |
||||
defer localFree(sdBuffer) |
||||
sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) |
||||
copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) |
||||
return sd, nil |
||||
} |
||||
|
||||
func SecurityDescriptorToSddl(sd []byte) (string, error) { |
||||
var sddl *uint16 |
||||
// The returned string length seems to including an aribtrary number of terminating NULs.
|
||||
// Don't use it.
|
||||
err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) |
||||
if err != nil { |
||||
return "", err |
||||
} |
||||
defer localFree(uintptr(unsafe.Pointer(sddl))) |
||||
return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil |
||||
} |
@ -0,0 +1,3 @@ |
||||
package winio |
||||
|
||||
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go
|
@ -0,0 +1,562 @@ |
||||
// Code generated by 'go generate'; DO NOT EDIT.
|
||||
|
||||
package winio |
||||
|
||||
import ( |
||||
"syscall" |
||||
"unsafe" |
||||
|
||||
"golang.org/x/sys/windows" |
||||
) |
||||
|
||||
var _ unsafe.Pointer |
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const ( |
||||
errnoERROR_IO_PENDING = 997 |
||||
) |
||||
|
||||
var ( |
||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) |
||||
) |
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e syscall.Errno) error { |
||||
switch e { |
||||
case 0: |
||||
return nil |
||||
case errnoERROR_IO_PENDING: |
||||
return errERROR_IO_PENDING |
||||
} |
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e |
||||
} |
||||
|
||||
var ( |
||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll") |
||||
modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") |
||||
modntdll = windows.NewLazySystemDLL("ntdll.dll") |
||||
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") |
||||
|
||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx") |
||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") |
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") |
||||
procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") |
||||
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") |
||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") |
||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") |
||||
procCreateFileW = modkernel32.NewProc("CreateFileW") |
||||
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") |
||||
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") |
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc") |
||||
procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") |
||||
procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") |
||||
procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") |
||||
procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") |
||||
procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") |
||||
procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") |
||||
procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") |
||||
procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") |
||||
procLocalFree = modkernel32.NewProc("LocalFree") |
||||
procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") |
||||
procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") |
||||
procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") |
||||
procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") |
||||
procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") |
||||
procRevertToSelf = modadvapi32.NewProc("RevertToSelf") |
||||
procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") |
||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") |
||||
procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") |
||||
procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") |
||||
procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") |
||||
procBackupRead = modkernel32.NewProc("BackupRead") |
||||
procBackupWrite = modkernel32.NewProc("BackupWrite") |
||||
procbind = modws2_32.NewProc("bind") |
||||
) |
||||
|
||||
func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { |
||||
r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { |
||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) |
||||
newport = syscall.Handle(r0) |
||||
if newport == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { |
||||
r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { |
||||
var _p0 uint32 |
||||
if wait { |
||||
_p0 = 1 |
||||
} else { |
||||
_p0 = 0 |
||||
} |
||||
r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { |
||||
r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { |
||||
var _p0 *uint16 |
||||
_p0, err = syscall.UTF16PtrFromString(name) |
||||
if err != nil { |
||||
return |
||||
} |
||||
return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) |
||||
} |
||||
|
||||
func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { |
||||
r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) |
||||
handle = syscall.Handle(r0) |
||||
if handle == syscall.InvalidHandle { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { |
||||
var _p0 *uint16 |
||||
_p0, err = syscall.UTF16PtrFromString(name) |
||||
if err != nil { |
||||
return |
||||
} |
||||
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) |
||||
} |
||||
|
||||
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { |
||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) |
||||
handle = syscall.Handle(r0) |
||||
if handle == syscall.InvalidHandle { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { |
||||
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) |
||||
ptr = uintptr(r0) |
||||
return |
||||
} |
||||
|
||||
func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { |
||||
r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) |
||||
status = ntstatus(r0) |
||||
return |
||||
} |
||||
|
||||
func rtlNtStatusToDosError(status ntstatus) (winerr error) { |
||||
r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) |
||||
if r0 != 0 { |
||||
winerr = syscall.Errno(r0) |
||||
} |
||||
return |
||||
} |
||||
|
||||
func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { |
||||
r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) |
||||
status = ntstatus(r0) |
||||
return |
||||
} |
||||
|
||||
func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { |
||||
r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) |
||||
status = ntstatus(r0) |
||||
return |
||||
} |
||||
|
||||
func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { |
||||
var _p0 *uint16 |
||||
_p0, err = syscall.UTF16PtrFromString(accountName) |
||||
if err != nil { |
||||
return |
||||
} |
||||
return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) |
||||
} |
||||
|
||||
func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func convertSidToStringSid(sid *byte, str **uint16) (err error) { |
||||
r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { |
||||
var _p0 *uint16 |
||||
_p0, err = syscall.UTF16PtrFromString(str) |
||||
if err != nil { |
||||
return |
||||
} |
||||
return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) |
||||
} |
||||
|
||||
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func localFree(mem uintptr) { |
||||
syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) |
||||
return |
||||
} |
||||
|
||||
func getSecurityDescriptorLength(sd uintptr) (len uint32) { |
||||
r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) |
||||
len = uint32(r0) |
||||
return |
||||
} |
||||
|
||||
func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { |
||||
var _p0 uint32 |
||||
if releaseAll { |
||||
_p0 = 1 |
||||
} else { |
||||
_p0 = 0 |
||||
} |
||||
r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) |
||||
success = r0 != 0 |
||||
if true { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func impersonateSelf(level uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func revertToSelf() (err error) { |
||||
r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { |
||||
var _p0 uint32 |
||||
if openAsSelf { |
||||
_p0 = 1 |
||||
} else { |
||||
_p0 = 0 |
||||
} |
||||
r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func getCurrentThread() (h syscall.Handle) { |
||||
r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) |
||||
h = syscall.Handle(r0) |
||||
return |
||||
} |
||||
|
||||
func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { |
||||
var _p0 *uint16 |
||||
_p0, err = syscall.UTF16PtrFromString(systemName) |
||||
if err != nil { |
||||
return |
||||
} |
||||
var _p1 *uint16 |
||||
_p1, err = syscall.UTF16PtrFromString(name) |
||||
if err != nil { |
||||
return |
||||
} |
||||
return _lookupPrivilegeValue(_p0, _p1, luid) |
||||
} |
||||
|
||||
func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { |
||||
r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { |
||||
var _p0 *uint16 |
||||
_p0, err = syscall.UTF16PtrFromString(systemName) |
||||
if err != nil { |
||||
return |
||||
} |
||||
return _lookupPrivilegeName(_p0, luid, buffer, size) |
||||
} |
||||
|
||||
func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { |
||||
var _p0 *uint16 |
||||
_p0, err = syscall.UTF16PtrFromString(systemName) |
||||
if err != nil { |
||||
return |
||||
} |
||||
return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) |
||||
} |
||||
|
||||
func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { |
||||
r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { |
||||
var _p0 *byte |
||||
if len(b) > 0 { |
||||
_p0 = &b[0] |
||||
} |
||||
var _p1 uint32 |
||||
if abort { |
||||
_p1 = 1 |
||||
} else { |
||||
_p1 = 0 |
||||
} |
||||
var _p2 uint32 |
||||
if processSecurity { |
||||
_p2 = 1 |
||||
} else { |
||||
_p2 = 0 |
||||
} |
||||
r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { |
||||
var _p0 *byte |
||||
if len(b) > 0 { |
||||
_p0 = &b[0] |
||||
} |
||||
var _p1 uint32 |
||||
if abort { |
||||
_p1 = 1 |
||||
} else { |
||||
_p1 = 0 |
||||
} |
||||
var _p2 uint32 |
||||
if processSecurity { |
||||
_p2 = 1 |
||||
} else { |
||||
_p2 = 0 |
||||
} |
||||
r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) |
||||
if r1 == 0 { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { |
||||
r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) |
||||
if r1 == socketError { |
||||
if e1 != 0 { |
||||
err = errnoErr(e1) |
||||
} else { |
||||
err = syscall.EINVAL |
||||
} |
||||
} |
||||
return |
||||
} |
@ -0,0 +1,191 @@ |
||||
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
https://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
Copyright The containerd Authors |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
@ -0,0 +1,16 @@ |
||||
Docker |
||||
Copyright 2012-2015 Docker, Inc. |
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com). |
||||
|
||||
The following is courtesy of our legal counsel: |
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the |
||||
United States and other governments. |
||||
It is your responsibility to ensure that your use and/or transfer does not |
||||
violate applicable laws. |
||||
|
||||
For more information, please see https://www.bis.doc.gov |
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. |
@ -0,0 +1,78 @@ |
||||
/* |
||||
Copyright The containerd Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
//
|
||||
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errdefs |
||||
|
||||
import "github.com/pkg/errors" |
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
// errors returned by most packages will map into one of these errors classes.
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||
// map very well to those defined by grpc.
|
||||
var ( |
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid argument") |
||||
ErrNotFound = errors.New("not found") |
||||
ErrAlreadyExists = errors.New("already exists") |
||||
ErrFailedPrecondition = errors.New("failed precondition") |
||||
ErrUnavailable = errors.New("unavailable") |
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
) |
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool { |
||||
return errors.Cause(err) == ErrInvalidArgument |
||||
} |
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool { |
||||
return errors.Cause(err) == ErrNotFound |
||||
} |
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool { |
||||
return errors.Cause(err) == ErrAlreadyExists |
||||
} |
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool { |
||||
return errors.Cause(err) == ErrFailedPrecondition |
||||
} |
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool { |
||||
return errors.Cause(err) == ErrUnavailable |
||||
} |
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool { |
||||
return errors.Cause(err) == ErrNotImplemented |
||||
} |
@ -0,0 +1,138 @@ |
||||
/* |
||||
Copyright The containerd Authors. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
*/ |
||||
|
||||
package errdefs |
||||
|
||||
import ( |
||||
"strings" |
||||
|
||||
"github.com/pkg/errors" |
||||
"google.golang.org/grpc/codes" |
||||
"google.golang.org/grpc/status" |
||||
) |
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error { |
||||
if err == nil { |
||||
return nil |
||||
} |
||||
|
||||
if isGRPCError(err) { |
||||
// error has already been mapped to grpc
|
||||
return err |
||||
} |
||||
|
||||
switch { |
||||
case IsInvalidArgument(err): |
||||
return status.Errorf(codes.InvalidArgument, err.Error()) |
||||
case IsNotFound(err): |
||||
return status.Errorf(codes.NotFound, err.Error()) |
||||
case IsAlreadyExists(err): |
||||
return status.Errorf(codes.AlreadyExists, err.Error()) |
||||
case IsFailedPrecondition(err): |
||||
return status.Errorf(codes.FailedPrecondition, err.Error()) |
||||
case IsUnavailable(err): |
||||
return status.Errorf(codes.Unavailable, err.Error()) |
||||
case IsNotImplemented(err): |
||||
return status.Errorf(codes.Unimplemented, err.Error()) |
||||
} |
||||
|
||||
return err |
||||
} |
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error { |
||||
return ToGRPC(errors.Wrapf(err, format, args...)) |
||||
} |
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error { |
||||
if err == nil { |
||||
return nil |
||||
} |
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) { |
||||
case codes.InvalidArgument: |
||||
cls = ErrInvalidArgument |
||||
case codes.AlreadyExists: |
||||
cls = ErrAlreadyExists |
||||
case codes.NotFound: |
||||
cls = ErrNotFound |
||||
case codes.Unavailable: |
||||
cls = ErrUnavailable |
||||
case codes.FailedPrecondition: |
||||
cls = ErrFailedPrecondition |
||||
case codes.Unimplemented: |
||||
cls = ErrNotImplemented |
||||
default: |
||||
cls = ErrUnknown |
||||
} |
||||
|
||||
msg := rebaseMessage(cls, err) |
||||
if msg != "" { |
||||
err = errors.Wrapf(cls, msg) |
||||
} else { |
||||
err = errors.WithStack(cls) |
||||
} |
||||
|
||||
return err |
||||
} |
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string { |
||||
desc := errDesc(err) |
||||
clss := cls.Error() |
||||
if desc == clss { |
||||
return "" |
||||
} |
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss) |
||||
} |
||||
|
||||
func isGRPCError(err error) bool { |
||||
_, ok := status.FromError(err) |
||||
return ok |
||||
} |
||||
|
||||
func code(err error) codes.Code { |
||||
if s, ok := status.FromError(err); ok { |
||||
return s.Code() |
||||
} |
||||
return codes.Unknown |
||||
} |
||||
|
||||
func errDesc(err error) string { |
||||
if s, ok := status.FromError(err); ok { |
||||
return s.Message() |
||||
} |
||||
return err.Error() |
||||
} |
@ -0,0 +1,202 @@ |
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
http://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
APPENDIX: How to apply the Apache License to your work. |
||||
|
||||
To apply the Apache License to your work, attach the following |
||||
boilerplate notice, with the fields enclosed by brackets "{}" |
||||
replaced with your own identifying information. (Don't include |
||||
the brackets!) The text should be enclosed in the appropriate |
||||
comment syntax for the file format. We also recommend that a |
||||
file or class name and description of purpose be included on the |
||||
same "printed page" as the copyright notice for easier |
||||
identification within third-party archives. |
||||
|
||||
Copyright {yyyy} {name of copyright owner} |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
||||
|
@ -0,0 +1,247 @@ |
||||
package digestset |
||||
|
||||
import ( |
||||
"errors" |
||||
"sort" |
||||
"strings" |
||||
"sync" |
||||
|
||||
digest "github.com/opencontainers/go-digest" |
||||
) |
||||
|
||||
var ( |
||||
// ErrDigestNotFound is used when a matching digest
|
||||
// could not be found in a set.
|
||||
ErrDigestNotFound = errors.New("digest not found") |
||||
|
||||
// ErrDigestAmbiguous is used when multiple digests
|
||||
// are found in a set. None of the matching digests
|
||||
// should be considered valid matches.
|
||||
ErrDigestAmbiguous = errors.New("ambiguous digest string") |
||||
) |
||||
|
||||
// Set is used to hold a unique set of digests which
|
||||
// may be easily referenced by easily referenced by a string
|
||||
// representation of the digest as well as short representation.
|
||||
// The uniqueness of the short representation is based on other
|
||||
// digests in the set. If digests are omitted from this set,
|
||||
// collisions in a larger set may not be detected, therefore it
|
||||
// is important to always do short representation lookups on
|
||||
// the complete set of digests. To mitigate collisions, an
|
||||
// appropriately long short code should be used.
|
||||
type Set struct { |
||||
mutex sync.RWMutex |
||||
entries digestEntries |
||||
} |
||||
|
||||
// NewSet creates an empty set of digests
|
||||
// which may have digests added.
|
||||
func NewSet() *Set { |
||||
return &Set{ |
||||
entries: digestEntries{}, |
||||
} |
||||
} |
||||
|
||||
// checkShortMatch checks whether two digests match as either whole
|
||||
// values or short values. This function does not test equality,
|
||||
// rather whether the second value could match against the first
|
||||
// value.
|
||||
func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { |
||||
if len(hex) == len(shortHex) { |
||||
if hex != shortHex { |
||||
return false |
||||
} |
||||
if len(shortAlg) > 0 && string(alg) != shortAlg { |
||||
return false |
||||
} |
||||
} else if !strings.HasPrefix(hex, shortHex) { |
||||
return false |
||||
} else if len(shortAlg) > 0 && string(alg) != shortAlg { |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// Lookup looks for a digest matching the given string representation.
|
||||
// If no digests could be found ErrDigestNotFound will be returned
|
||||
// with an empty digest value. If multiple matches are found
|
||||
// ErrDigestAmbiguous will be returned with an empty digest value.
|
||||
func (dst *Set) Lookup(d string) (digest.Digest, error) { |
||||
dst.mutex.RLock() |
||||
defer dst.mutex.RUnlock() |
||||
if len(dst.entries) == 0 { |
||||
return "", ErrDigestNotFound |
||||
} |
||||
var ( |
||||
searchFunc func(int) bool |
||||
alg digest.Algorithm |
||||
hex string |
||||
) |
||||
dgst, err := digest.Parse(d) |
||||
if err == digest.ErrDigestInvalidFormat { |
||||
hex = d |
||||
searchFunc = func(i int) bool { |
||||
return dst.entries[i].val >= d |
||||
} |
||||
} else { |
||||
hex = dgst.Hex() |
||||
alg = dgst.Algorithm() |
||||
searchFunc = func(i int) bool { |
||||
if dst.entries[i].val == hex { |
||||
return dst.entries[i].alg >= alg |
||||
} |
||||
return dst.entries[i].val >= hex |
||||
} |
||||
} |
||||
idx := sort.Search(len(dst.entries), searchFunc) |
||||
if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { |
||||
return "", ErrDigestNotFound |
||||
} |
||||
if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { |
||||
return dst.entries[idx].digest, nil |
||||
} |
||||
if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { |
||||
return "", ErrDigestAmbiguous |
||||
} |
||||
|
||||
return dst.entries[idx].digest, nil |
||||
} |
||||
|
||||
// Add adds the given digest to the set. An error will be returned
|
||||
// if the given digest is invalid. If the digest already exists in the
|
||||
// set, this operation will be a no-op.
|
||||
func (dst *Set) Add(d digest.Digest) error { |
||||
if err := d.Validate(); err != nil { |
||||
return err |
||||
} |
||||
dst.mutex.Lock() |
||||
defer dst.mutex.Unlock() |
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} |
||||
searchFunc := func(i int) bool { |
||||
if dst.entries[i].val == entry.val { |
||||
return dst.entries[i].alg >= entry.alg |
||||
} |
||||
return dst.entries[i].val >= entry.val |
||||
} |
||||
idx := sort.Search(len(dst.entries), searchFunc) |
||||
if idx == len(dst.entries) { |
||||
dst.entries = append(dst.entries, entry) |
||||
return nil |
||||
} else if dst.entries[idx].digest == d { |
||||
return nil |
||||
} |
||||
|
||||
entries := append(dst.entries, nil) |
||||
copy(entries[idx+1:], entries[idx:len(entries)-1]) |
||||
entries[idx] = entry |
||||
dst.entries = entries |
||||
return nil |
||||
} |
||||
|
||||
// Remove removes the given digest from the set. An err will be
|
||||
// returned if the given digest is invalid. If the digest does
|
||||
// not exist in the set, this operation will be a no-op.
|
||||
func (dst *Set) Remove(d digest.Digest) error { |
||||
if err := d.Validate(); err != nil { |
||||
return err |
||||
} |
||||
dst.mutex.Lock() |
||||
defer dst.mutex.Unlock() |
||||
entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} |
||||
searchFunc := func(i int) bool { |
||||
if dst.entries[i].val == entry.val { |
||||
return dst.entries[i].alg >= entry.alg |
||||
} |
||||
return dst.entries[i].val >= entry.val |
||||
} |
||||
idx := sort.Search(len(dst.entries), searchFunc) |
||||
// Not found if idx is after or value at idx is not digest
|
||||
if idx == len(dst.entries) || dst.entries[idx].digest != d { |
||||
return nil |
||||
} |
||||
|
||||
entries := dst.entries |
||||
copy(entries[idx:], entries[idx+1:]) |
||||
entries = entries[:len(entries)-1] |
||||
dst.entries = entries |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// All returns all the digests in the set
|
||||
func (dst *Set) All() []digest.Digest { |
||||
dst.mutex.RLock() |
||||
defer dst.mutex.RUnlock() |
||||
retValues := make([]digest.Digest, len(dst.entries)) |
||||
for i := range dst.entries { |
||||
retValues[i] = dst.entries[i].digest |
||||
} |
||||
|
||||
return retValues |
||||
} |
||||
|
||||
// ShortCodeTable returns a map of Digest to unique short codes. The
|
||||
// length represents the minimum value, the maximum length may be the
|
||||
// entire value of digest if uniqueness cannot be achieved without the
|
||||
// full value. This function will attempt to make short codes as short
|
||||
// as possible to be unique.
|
||||
func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { |
||||
dst.mutex.RLock() |
||||
defer dst.mutex.RUnlock() |
||||
m := make(map[digest.Digest]string, len(dst.entries)) |
||||
l := length |
||||
resetIdx := 0 |
||||
for i := 0; i < len(dst.entries); i++ { |
||||
var short string |
||||
extended := true |
||||
for extended { |
||||
extended = false |
||||
if len(dst.entries[i].val) <= l { |
||||
short = dst.entries[i].digest.String() |
||||
} else { |
||||
short = dst.entries[i].val[:l] |
||||
for j := i + 1; j < len(dst.entries); j++ { |
||||
if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { |
||||
if j > resetIdx { |
||||
resetIdx = j |
||||
} |
||||
extended = true |
||||
} else { |
||||
break |
||||
} |
||||
} |
||||
if extended { |
||||
l++ |
||||
} |
||||
} |
||||
} |
||||
m[dst.entries[i].digest] = short |
||||
if i >= resetIdx { |
||||
l = length |
||||
} |
||||
} |
||||
return m |
||||
} |
||||
|
||||
type digestEntry struct { |
||||
alg digest.Algorithm |
||||
val string |
||||
digest digest.Digest |
||||
} |
||||
|
||||
type digestEntries []*digestEntry |
||||
|
||||
func (d digestEntries) Len() int { |
||||
return len(d) |
||||
} |
||||
|
||||
func (d digestEntries) Less(i, j int) bool { |
||||
if d[i].val != d[j].val { |
||||
return d[i].val < d[j].val |
||||
} |
||||
return d[i].alg < d[j].alg |
||||
} |
||||
|
||||
func (d digestEntries) Swap(i, j int) { |
||||
d[i], d[j] = d[j], d[i] |
||||
} |
@ -0,0 +1,42 @@ |
||||
package reference |
||||
|
||||
import "path" |
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool { |
||||
if _, ok := ref.(NamedTagged); ok { |
||||
return false |
||||
} |
||||
if _, ok := ref.(Canonical); ok { |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
func FamiliarName(ref Named) string { |
||||
if nn, ok := ref.(normalizedNamed); ok { |
||||
return nn.Familiar().Name() |
||||
} |
||||
return ref.Name() |
||||
} |
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
func FamiliarString(ref Reference) string { |
||||
if nn, ok := ref.(normalizedNamed); ok { |
||||
return nn.Familiar().String() |
||||
} |
||||
return ref.String() |
||||
} |
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) { |
||||
matched, err := path.Match(pattern, FamiliarString(ref)) |
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched { |
||||
matched, _ = path.Match(pattern, FamiliarName(namedRef)) |
||||
} |
||||
return matched, err |
||||
} |
@ -0,0 +1,170 @@ |
||||
package reference |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"strings" |
||||
|
||||
"github.com/docker/distribution/digestset" |
||||
"github.com/opencontainers/go-digest" |
||||
) |
||||
|
||||
var ( |
||||
legacyDefaultDomain = "index.docker.io" |
||||
defaultDomain = "docker.io" |
||||
officialRepoName = "library" |
||||
defaultTag = "latest" |
||||
) |
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
// normalized and has a familiar form. A familiar name
|
||||
// is what is used in Docker UI. An example normalized
|
||||
// name is "docker.io/library/ubuntu" and corresponding
|
||||
// familiar name of "ubuntu".
|
||||
type normalizedNamed interface { |
||||
Named |
||||
Familiar() Named |
||||
} |
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
func ParseNormalizedNamed(s string) (Named, error) { |
||||
if ok := anchoredIdentifierRegexp.MatchString(s); ok { |
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) |
||||
} |
||||
domain, remainder := splitDockerDomain(s) |
||||
var remoteName string |
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { |
||||
remoteName = remainder[:tagSep] |
||||
} else { |
||||
remoteName = remainder |
||||
} |
||||
if strings.ToLower(remoteName) != remoteName { |
||||
return nil, errors.New("invalid reference format: repository name must be lowercase") |
||||
} |
||||
|
||||
ref, err := Parse(domain + "/" + remainder) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
named, isNamed := ref.(Named) |
||||
if !isNamed { |
||||
return nil, fmt.Errorf("reference %s has no name", ref.String()) |
||||
} |
||||
return named, nil |
||||
} |
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) { |
||||
i := strings.IndexRune(name, '/') |
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { |
||||
domain, remainder = defaultDomain, name |
||||
} else { |
||||
domain, remainder = name[:i], name[i+1:] |
||||
} |
||||
if domain == legacyDefaultDomain { |
||||
domain = defaultDomain |
||||
} |
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { |
||||
remainder = officialRepoName + "/" + remainder |
||||
} |
||||
return |
||||
} |
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
// Returns a familiarized named only reference.
|
||||
func familiarizeName(named namedRepository) repository { |
||||
repo := repository{ |
||||
domain: named.Domain(), |
||||
path: named.Path(), |
||||
} |
||||
|
||||
if repo.domain == defaultDomain { |
||||
repo.domain = "" |
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { |
||||
repo.path = split[1] |
||||
} |
||||
} |
||||
return repo |
||||
} |
||||
|
||||
func (r reference) Familiar() Named { |
||||
return reference{ |
||||
namedRepository: familiarizeName(r.namedRepository), |
||||
tag: r.tag, |
||||
digest: r.digest, |
||||
} |
||||
} |
||||
|
||||
func (r repository) Familiar() Named { |
||||
return familiarizeName(r) |
||||
} |
||||
|
||||
func (t taggedReference) Familiar() Named { |
||||
return taggedReference{ |
||||
namedRepository: familiarizeName(t.namedRepository), |
||||
tag: t.tag, |
||||
} |
||||
} |
||||
|
||||
func (c canonicalReference) Familiar() Named { |
||||
return canonicalReference{ |
||||
namedRepository: familiarizeName(c.namedRepository), |
||||
digest: c.digest, |
||||
} |
||||
} |
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
func TagNameOnly(ref Named) Named { |
||||
if IsNameOnly(ref) { |
||||
namedTagged, err := WithTag(ref, defaultTag) |
||||
if err != nil { |
||||
// Default tag must be valid, to create a NamedTagged
|
||||
// type with non-validated input the WithTag function
|
||||
// should be used instead
|
||||
panic(err) |
||||
} |
||||
return namedTagged |
||||
} |
||||
return ref |
||||
} |
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
func ParseAnyReference(ref string) (Reference, error) { |
||||
if ok := anchoredIdentifierRegexp.MatchString(ref); ok { |
||||
return digestReference("sha256:" + ref), nil |
||||
} |
||||
if dgst, err := digest.Parse(ref); err == nil { |
||||
return digestReference(dgst), nil |
||||
} |
||||
|
||||
return ParseNormalizedNamed(ref) |
||||
} |
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { |
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { |
||||
dgst, err := ds.Lookup(ref) |
||||
if err == nil { |
||||
return digestReference(dgst), nil |
||||
} |
||||
} else { |
||||
if dgst, err := digest.Parse(ref); err == nil { |
||||
return digestReference(dgst), nil |
||||
} |
||||
} |
||||
|
||||
return ParseNormalizedNamed(ref) |
||||
} |
@ -0,0 +1,433 @@ |
||||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]*
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference |
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"strings" |
||||
|
||||
"github.com/opencontainers/go-digest" |
||||
) |
||||
|
||||
const ( |
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255 |
||||
) |
||||
|
||||
var ( |
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format") |
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format") |
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format") |
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase") |
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component") |
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) |
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
ErrNameNotCanonical = errors.New("repository name must be canonical") |
||||
) |
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface { |
||||
// String returns the full reference
|
||||
String() string |
||||
} |
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct { |
||||
reference Reference |
||||
} |
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field { |
||||
return Field{reference} |
||||
} |
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference { |
||||
return f.reference |
||||
} |
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) { |
||||
return []byte(f.reference.String()), nil |
||||
} |
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error { |
||||
r, err := Parse(string(p)) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
|
||||
f.reference = r |
||||
return nil |
||||
} |
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface { |
||||
Reference |
||||
Name() string |
||||
} |
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface { |
||||
Reference |
||||
Tag() string |
||||
} |
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
type NamedTagged interface { |
||||
Named |
||||
Tag() string |
||||
} |
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface { |
||||
Reference |
||||
Digest() digest.Digest |
||||
} |
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with domain and digest
|
||||
type Canonical interface { |
||||
Named |
||||
Digest() digest.Digest |
||||
} |
||||
|
||||
// namedRepository is a reference to a repository with a name.
|
||||
// A namedRepository has both domain and path components.
|
||||
type namedRepository interface { |
||||
Named |
||||
Domain() string |
||||
Path() string |
||||
} |
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
func Domain(named Named) string { |
||||
if r, ok := named.(namedRepository); ok { |
||||
return r.Domain() |
||||
} |
||||
domain, _ := splitDomain(named.Name()) |
||||
return domain |
||||
} |
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
func Path(named Named) (name string) { |
||||
if r, ok := named.(namedRepository); ok { |
||||
return r.Path() |
||||
} |
||||
_, path := splitDomain(named.Name()) |
||||
return path |
||||
} |
||||
|
||||
func splitDomain(name string) (string, string) { |
||||
match := anchoredNameRegexp.FindStringSubmatch(name) |
||||
if len(match) != 3 { |
||||
return "", name |
||||
} |
||||
return match[1], match[2] |
||||
} |
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) { |
||||
if r, ok := named.(namedRepository); ok { |
||||
return r.Domain(), r.Path() |
||||
} |
||||
return splitDomain(named.Name()) |
||||
} |
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) { |
||||
matches := ReferenceRegexp.FindStringSubmatch(s) |
||||
if matches == nil { |
||||
if s == "" { |
||||
return nil, ErrNameEmpty |
||||
} |
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { |
||||
return nil, ErrNameContainsUppercase |
||||
} |
||||
return nil, ErrReferenceInvalidFormat |
||||
} |
||||
|
||||
if len(matches[1]) > NameTotalLengthMax { |
||||
return nil, ErrNameTooLong |
||||
} |
||||
|
||||
var repo repository |
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) |
||||
if nameMatch != nil && len(nameMatch) == 3 { |
||||
repo.domain = nameMatch[1] |
||||
repo.path = nameMatch[2] |
||||
} else { |
||||
repo.domain = "" |
||||
repo.path = matches[1] |
||||
} |
||||
|
||||
ref := reference{ |
||||
namedRepository: repo, |
||||
tag: matches[2], |
||||
} |
||||
if matches[3] != "" { |
||||
var err error |
||||
ref.digest, err = digest.Parse(matches[3]) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
} |
||||
|
||||
r := getBestReferenceType(ref) |
||||
if r == nil { |
||||
return nil, ErrNameEmpty |
||||
} |
||||
|
||||
return r, nil |
||||
} |
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) { |
||||
named, err := ParseNormalizedNamed(s) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
if named.String() != s { |
||||
return nil, ErrNameNotCanonical |
||||
} |
||||
return named, nil |
||||
} |
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) { |
||||
if len(name) > NameTotalLengthMax { |
||||
return nil, ErrNameTooLong |
||||
} |
||||
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name) |
||||
if match == nil || len(match) != 3 { |
||||
return nil, ErrReferenceInvalidFormat |
||||
} |
||||
return repository{ |
||||
domain: match[1], |
||||
path: match[2], |
||||
}, nil |
||||
} |
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) { |
||||
if !anchoredTagRegexp.MatchString(tag) { |
||||
return nil, ErrTagInvalidFormat |
||||
} |
||||
var repo repository |
||||
if r, ok := name.(namedRepository); ok { |
||||
repo.domain = r.Domain() |
||||
repo.path = r.Path() |
||||
} else { |
||||
repo.path = name.Name() |
||||
} |
||||
if canonical, ok := name.(Canonical); ok { |
||||
return reference{ |
||||
namedRepository: repo, |
||||
tag: tag, |
||||
digest: canonical.Digest(), |
||||
}, nil |
||||
} |
||||
return taggedReference{ |
||||
namedRepository: repo, |
||||
tag: tag, |
||||
}, nil |
||||
} |
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) { |
||||
if !anchoredDigestRegexp.MatchString(digest.String()) { |
||||
return nil, ErrDigestInvalidFormat |
||||
} |
||||
var repo repository |
||||
if r, ok := name.(namedRepository); ok { |
||||
repo.domain = r.Domain() |
||||
repo.path = r.Path() |
||||
} else { |
||||
repo.path = name.Name() |
||||
} |
||||
if tagged, ok := name.(Tagged); ok { |
||||
return reference{ |
||||
namedRepository: repo, |
||||
tag: tagged.Tag(), |
||||
digest: digest, |
||||
}, nil |
||||
} |
||||
return canonicalReference{ |
||||
namedRepository: repo, |
||||
digest: digest, |
||||
}, nil |
||||
} |
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named { |
||||
domain, path := SplitHostname(ref) |
||||
return repository{ |
||||
domain: domain, |
||||
path: path, |
||||
} |
||||
} |
||||
|
||||
func getBestReferenceType(ref reference) Reference { |
||||
if ref.Name() == "" { |
||||
// Allow digest only references
|
||||
if ref.digest != "" { |
||||
return digestReference(ref.digest) |
||||
} |
||||
return nil |
||||
} |
||||
if ref.tag == "" { |
||||
if ref.digest != "" { |
||||
return canonicalReference{ |
||||
namedRepository: ref.namedRepository, |
||||
digest: ref.digest, |
||||
} |
||||
} |
||||
return ref.namedRepository |
||||
} |
||||
if ref.digest == "" { |
||||
return taggedReference{ |
||||
namedRepository: ref.namedRepository, |
||||
tag: ref.tag, |
||||
} |
||||
} |
||||
|
||||
return ref |
||||
} |
||||
|
||||
type reference struct { |
||||
namedRepository |
||||
tag string |
||||
digest digest.Digest |
||||
} |
||||
|
||||
func (r reference) String() string { |
||||
return r.Name() + ":" + r.tag + "@" + r.digest.String() |
||||
} |
||||
|
||||
func (r reference) Tag() string { |
||||
return r.tag |
||||
} |
||||
|
||||
func (r reference) Digest() digest.Digest { |
||||
return r.digest |
||||
} |
||||
|
||||
type repository struct { |
||||
domain string |
||||
path string |
||||
} |
||||
|
||||
func (r repository) String() string { |
||||
return r.Name() |
||||
} |
||||
|
||||
func (r repository) Name() string { |
||||
if r.domain == "" { |
||||
return r.path |
||||
} |
||||
return r.domain + "/" + r.path |
||||
} |
||||
|
||||
func (r repository) Domain() string { |
||||
return r.domain |
||||
} |
||||
|
||||
func (r repository) Path() string { |
||||
return r.path |
||||
} |
||||
|
||||
type digestReference digest.Digest |
||||
|
||||
func (d digestReference) String() string { |
||||
return digest.Digest(d).String() |
||||
} |
||||
|
||||
func (d digestReference) Digest() digest.Digest { |
||||
return digest.Digest(d) |
||||
} |
||||
|
||||
type taggedReference struct { |
||||
namedRepository |
||||
tag string |
||||
} |
||||
|
||||
func (t taggedReference) String() string { |
||||
return t.Name() + ":" + t.tag |
||||
} |
||||
|
||||
func (t taggedReference) Tag() string { |
||||
return t.tag |
||||
} |
||||
|
||||
type canonicalReference struct { |
||||
namedRepository |
||||
digest digest.Digest |
||||
} |
||||
|
||||
func (c canonicalReference) String() string { |
||||
return c.Name() + "@" + c.digest.String() |
||||
} |
||||
|
||||
func (c canonicalReference) Digest() digest.Digest { |
||||
return c.digest |
||||
} |
@ -0,0 +1,143 @@ |
||||
package reference |
||||
|
||||
import "regexp" |
||||
|
||||
var ( |
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`) |
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`) |
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression( |
||||
alphaNumericRegexp, |
||||
optional(repeated(separatorRegexp, alphaNumericRegexp))) |
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by DomainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) |
||||
|
||||
// DomainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
DomainRegexp = expression( |
||||
domainComponentRegexp, |
||||
optional(repeated(literal(`.`), domainComponentRegexp)), |
||||
optional(literal(`:`), match(`[0-9]+`))) |
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`) |
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp) |
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) |
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp) |
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression( |
||||
optional(DomainRegexp, literal(`/`)), |
||||
nameComponentRegexp, |
||||
optional(repeated(literal(`/`), nameComponentRegexp))) |
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored( |
||||
optional(capture(DomainRegexp), literal(`/`)), |
||||
capture(nameComponentRegexp, |
||||
optional(repeated(literal(`/`), nameComponentRegexp)))) |
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp), |
||||
optional(literal(":"), capture(TagRegexp)), |
||||
optional(literal("@"), capture(DigestRegexp))) |
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`) |
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) |
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp) |
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) |
||||
) |
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile |
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp { |
||||
re := match(regexp.QuoteMeta(s)) |
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete { |
||||
panic("must be a literal") |
||||
} |
||||
|
||||
return re |
||||
} |
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp { |
||||
var s string |
||||
for _, re := range res { |
||||
s += re.String() |
||||
} |
||||
|
||||
return match(s) |
||||
} |
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp { |
||||
return match(group(expression(res...)).String() + `?`) |
||||
} |
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp { |
||||
return match(group(expression(res...)).String() + `+`) |
||||
} |
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp { |
||||
return match(`(?:` + expression(res...).String() + `)`) |
||||
} |
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp { |
||||
return match(`(` + expression(res...).String() + `)`) |
||||
} |
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp { |
||||
return match(`^` + expression(res...).String() + `$`) |
||||
} |
@ -0,0 +1,267 @@ |
||||
package errcode |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"strings" |
||||
) |
||||
|
||||
// ErrorCoder is the base interface for ErrorCode and Error allowing
|
||||
// users of each to just call ErrorCode to get the real ID of each
|
||||
type ErrorCoder interface { |
||||
ErrorCode() ErrorCode |
||||
} |
||||
|
||||
// ErrorCode represents the error type. The errors are serialized via strings
|
||||
// and the integer format may change and should *never* be exported.
|
||||
type ErrorCode int |
||||
|
||||
var _ error = ErrorCode(0) |
||||
|
||||
// ErrorCode just returns itself
|
||||
func (ec ErrorCode) ErrorCode() ErrorCode { |
||||
return ec |
||||
} |
||||
|
||||
// Error returns the ID/Value
|
||||
func (ec ErrorCode) Error() string { |
||||
// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
|
||||
return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) |
||||
} |
||||
|
||||
// Descriptor returns the descriptor for the error code.
|
||||
func (ec ErrorCode) Descriptor() ErrorDescriptor { |
||||
d, ok := errorCodeToDescriptors[ec] |
||||
|
||||
if !ok { |
||||
return ErrorCodeUnknown.Descriptor() |
||||
} |
||||
|
||||
return d |
||||
} |
||||
|
||||
// String returns the canonical identifier for this error code.
|
||||
func (ec ErrorCode) String() string { |
||||
return ec.Descriptor().Value |
||||
} |
||||
|
||||
// Message returned the human-readable error message for this error code.
|
||||
func (ec ErrorCode) Message() string { |
||||
return ec.Descriptor().Message |
||||
} |
||||
|
||||
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
|
||||
// result.
|
||||
func (ec ErrorCode) MarshalText() (text []byte, err error) { |
||||
return []byte(ec.String()), nil |
||||
} |
||||
|
||||
// UnmarshalText decodes the form generated by MarshalText.
|
||||
func (ec *ErrorCode) UnmarshalText(text []byte) error { |
||||
desc, ok := idToDescriptors[string(text)] |
||||
|
||||
if !ok { |
||||
desc = ErrorCodeUnknown.Descriptor() |
||||
} |
||||
|
||||
*ec = desc.Code |
||||
|
||||
return nil |
||||
} |
||||
|
||||
// WithMessage creates a new Error struct based on the passed-in info and
|
||||
// overrides the Message property.
|
||||
func (ec ErrorCode) WithMessage(message string) Error { |
||||
return Error{ |
||||
Code: ec, |
||||
Message: message, |
||||
} |
||||
} |
||||
|
||||
// WithDetail creates a new Error struct based on the passed-in info and
|
||||
// set the Detail property appropriately
|
||||
func (ec ErrorCode) WithDetail(detail interface{}) Error { |
||||
return Error{ |
||||
Code: ec, |
||||
Message: ec.Message(), |
||||
}.WithDetail(detail) |
||||
} |
||||
|
||||
// WithArgs creates a new Error struct and sets the Args slice
|
||||
func (ec ErrorCode) WithArgs(args ...interface{}) Error { |
||||
return Error{ |
||||
Code: ec, |
||||
Message: ec.Message(), |
||||
}.WithArgs(args...) |
||||
} |
||||
|
||||
// Error provides a wrapper around ErrorCode with extra Details provided.
|
||||
type Error struct { |
||||
Code ErrorCode `json:"code"` |
||||
Message string `json:"message"` |
||||
Detail interface{} `json:"detail,omitempty"` |
||||
|
||||
// TODO(duglin): See if we need an "args" property so we can do the
|
||||
// variable substitution right before showing the message to the user
|
||||
} |
||||
|
||||
var _ error = Error{} |
||||
|
||||
// ErrorCode returns the ID/Value of this Error
|
||||
func (e Error) ErrorCode() ErrorCode { |
||||
return e.Code |
||||
} |
||||
|
||||
// Error returns a human readable representation of the error.
|
||||
func (e Error) Error() string { |
||||
return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) |
||||
} |
||||
|
||||
// WithDetail will return a new Error, based on the current one, but with
|
||||
// some Detail info added
|
||||
func (e Error) WithDetail(detail interface{}) Error { |
||||
return Error{ |
||||
Code: e.Code, |
||||
Message: e.Message, |
||||
Detail: detail, |
||||
} |
||||
} |
||||
|
||||
// WithArgs uses the passed-in list of interface{} as the substitution
|
||||
// variables in the Error's Message string, but returns a new Error
|
||||
func (e Error) WithArgs(args ...interface{}) Error { |
||||
return Error{ |
||||
Code: e.Code, |
||||
Message: fmt.Sprintf(e.Code.Message(), args...), |
||||
Detail: e.Detail, |
||||
} |
||||
} |
||||
|
||||
// ErrorDescriptor provides relevant information about a given error code.
|
||||
type ErrorDescriptor struct { |
||||
// Code is the error code that this descriptor describes.
|
||||
Code ErrorCode |
||||
|
||||
// Value provides a unique, string key, often captilized with
|
||||
// underscores, to identify the error code. This value is used as the
|
||||
// keyed value when serializing api errors.
|
||||
Value string |
||||
|
||||
// Message is a short, human readable decription of the error condition
|
||||
// included in API responses.
|
||||
Message string |
||||
|
||||
// Description provides a complete account of the errors purpose, suitable
|
||||
// for use in documentation.
|
||||
Description string |
||||
|
||||
// HTTPStatusCode provides the http status code that is associated with
|
||||
// this error condition.
|
||||
HTTPStatusCode int |
||||
} |
||||
|
||||
// ParseErrorCode returns the value by the string error code.
|
||||
// `ErrorCodeUnknown` will be returned if the error is not known.
|
||||
func ParseErrorCode(value string) ErrorCode { |
||||
ed, ok := idToDescriptors[value] |
||||
if ok { |
||||
return ed.Code |
||||
} |
||||
|
||||
return ErrorCodeUnknown |
||||
} |
||||
|
||||
// Errors provides the envelope for multiple errors and a few sugar methods
|
||||
// for use within the application.
|
||||
type Errors []error |
||||
|
||||
var _ error = Errors{} |
||||
|
||||
func (errs Errors) Error() string { |
||||
switch len(errs) { |
||||
case 0: |
||||
return "<nil>" |
||||
case 1: |
||||
return errs[0].Error() |
||||
default: |
||||
msg := "errors:\n" |
||||
for _, err := range errs { |
||||
msg += err.Error() + "\n" |
||||
} |
||||
return msg |
||||
} |
||||
} |
||||
|
||||
// Len returns the current number of errors.
|
||||
func (errs Errors) Len() int { |
||||
return len(errs) |
||||
} |
||||
|
||||
// MarshalJSON converts slice of error, ErrorCode or Error into a
|
||||
// slice of Error - then serializes
|
||||
func (errs Errors) MarshalJSON() ([]byte, error) { |
||||
var tmpErrs struct { |
||||
Errors []Error `json:"errors,omitempty"` |
||||
} |
||||
|
||||
for _, daErr := range errs { |
||||
var err Error |
||||
|
||||
switch daErr.(type) { |
||||
case ErrorCode: |
||||
err = daErr.(ErrorCode).WithDetail(nil) |
||||
case Error: |
||||
err = daErr.(Error) |
||||
default: |
||||
err = ErrorCodeUnknown.WithDetail(daErr) |
||||
|
||||
} |
||||
|
||||
// If the Error struct was setup and they forgot to set the
|
||||
// Message field (meaning its "") then grab it from the ErrCode
|
||||
msg := err.Message |
||||
if msg == "" { |
||||
msg = err.Code.Message() |
||||
} |
||||
|
||||
tmpErrs.Errors = append(tmpErrs.Errors, Error{ |
||||
Code: err.Code, |
||||
Message: msg, |
||||
Detail: err.Detail, |
||||
}) |
||||
} |
||||
|
||||
return json.Marshal(tmpErrs) |
||||
} |
||||
|
||||
// UnmarshalJSON deserializes []Error and then converts it into slice of
|
||||
// Error or ErrorCode
|
||||
func (errs *Errors) UnmarshalJSON(data []byte) error { |
||||
var tmpErrs struct { |
||||
Errors []Error |
||||
} |
||||
|
||||
if err := json.Unmarshal(data, &tmpErrs); err != nil { |
||||
return err |
||||
} |
||||
|
||||
var newErrs Errors |
||||
for _, daErr := range tmpErrs.Errors { |
||||
// If Message is empty or exactly matches the Code's message string
|
||||
// then just use the Code, no need for a full Error struct
|
||||
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { |
||||
// Error's w/o details get converted to ErrorCode
|
||||
newErrs = append(newErrs, daErr.Code) |
||||
} else { |
||||
// Error's w/ details are untouched
|
||||
newErrs = append(newErrs, Error{ |
||||
Code: daErr.Code, |
||||
Message: daErr.Message, |
||||
Detail: daErr.Detail, |
||||
}) |
||||
} |
||||
} |
||||
|
||||
*errs = newErrs |
||||
return nil |
||||
} |
@ -0,0 +1,40 @@ |
||||
package errcode |
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"net/http" |
||||
) |
||||
|
||||
// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
|
||||
// and sets the content-type header to 'application/json'. It will handle
|
||||
// ErrorCoder and Errors, and if necessary will create an envelope.
|
||||
func ServeJSON(w http.ResponseWriter, err error) error { |
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8") |
||||
var sc int |
||||
|
||||
switch errs := err.(type) { |
||||
case Errors: |
||||
if len(errs) < 1 { |
||||
break |
||||
} |
||||
|
||||
if err, ok := errs[0].(ErrorCoder); ok { |
||||
sc = err.ErrorCode().Descriptor().HTTPStatusCode |
||||
} |
||||
case ErrorCoder: |
||||
sc = errs.ErrorCode().Descriptor().HTTPStatusCode |
||||
err = Errors{err} // create an envelope.
|
||||
default: |
||||
// We just have an unhandled error type, so just place in an envelope
|
||||
// and move along.
|
||||
err = Errors{err} |
||||
} |
||||
|
||||
if sc == 0 { |
||||
sc = http.StatusInternalServerError |
||||
} |
||||
|
||||
w.WriteHeader(sc) |
||||
|
||||
return json.NewEncoder(w).Encode(err) |
||||
} |
@ -0,0 +1,138 @@ |
||||
package errcode |
||||
|
||||
import ( |
||||
"fmt" |
||||
"net/http" |
||||
"sort" |
||||
"sync" |
||||
) |
||||
|
||||
var ( |
||||
errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} |
||||
idToDescriptors = map[string]ErrorDescriptor{} |
||||
groupToDescriptors = map[string][]ErrorDescriptor{} |
||||
) |
||||
|
||||
var ( |
||||
// ErrorCodeUnknown is a generic error that can be used as a last
|
||||
// resort if there is no situation-specific error message that can be used
|
||||
ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ |
||||
Value: "UNKNOWN", |
||||
Message: "unknown error", |
||||
Description: `Generic error returned when the error does not have an |
||||
API classification.`, |
||||
HTTPStatusCode: http.StatusInternalServerError, |
||||
}) |
||||
|
||||
// ErrorCodeUnsupported is returned when an operation is not supported.
|
||||
ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ |
||||
Value: "UNSUPPORTED", |
||||
Message: "The operation is unsupported.", |
||||
Description: `The operation was unsupported due to a missing |
||||
implementation or invalid set of parameters.`, |
||||
HTTPStatusCode: http.StatusMethodNotAllowed, |
||||
}) |
||||
|
||||
// ErrorCodeUnauthorized is returned if a request requires
|
||||
// authentication.
|
||||
ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ |
||||
Value: "UNAUTHORIZED", |
||||
Message: "authentication required", |
||||
Description: `The access controller was unable to authenticate |
||||
the client. Often this will be accompanied by a |
||||
Www-Authenticate HTTP response header indicating how to |
||||
authenticate.`, |
||||
HTTPStatusCode: http.StatusUnauthorized, |
||||
}) |
||||
|
||||
// ErrorCodeDenied is returned if a client does not have sufficient
|
||||
// permission to perform an action.
|
||||
ErrorCodeDenied = Register("errcode", ErrorDescriptor{ |
||||
Value: "DENIED", |
||||
Message: "requested access to the resource is denied", |
||||
Description: `The access controller denied access for the |
||||
operation on a resource.`, |
||||
HTTPStatusCode: http.StatusForbidden, |
||||
}) |
||||
|
||||
// ErrorCodeUnavailable provides a common error to report unavailability
|
||||
// of a service or endpoint.
|
||||
ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ |
||||
Value: "UNAVAILABLE", |
||||
Message: "service unavailable", |
||||
Description: "Returned when a service is not available", |
||||
HTTPStatusCode: http.StatusServiceUnavailable, |
||||
}) |
||||
|
||||
// ErrorCodeTooManyRequests is returned if a client attempts too many
|
||||
// times to contact a service endpoint.
|
||||
ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ |
||||
Value: "TOOMANYREQUESTS", |
||||
Message: "too many requests", |
||||
Description: `Returned when a client attempts to contact a |
||||
service too many times`, |
||||
HTTPStatusCode: http.StatusTooManyRequests, |
||||
}) |
||||
) |
||||
|
||||
var nextCode = 1000 |
||||
var registerLock sync.Mutex |
||||
|
||||
// Register will make the passed-in error known to the environment and
|
||||
// return a new ErrorCode
|
||||
func Register(group string, descriptor ErrorDescriptor) ErrorCode { |
||||
registerLock.Lock() |
||||
defer registerLock.Unlock() |
||||
|
||||
descriptor.Code = ErrorCode(nextCode) |
||||
|
||||
if _, ok := idToDescriptors[descriptor.Value]; ok { |
||||
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) |
||||
} |
||||
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { |
||||
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) |
||||
} |
||||
|
||||
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) |
||||
errorCodeToDescriptors[descriptor.Code] = descriptor |
||||
idToDescriptors[descriptor.Value] = descriptor |
||||
|
||||
nextCode++ |
||||
return descriptor.Code |
||||
} |
||||
|
||||
type byValue []ErrorDescriptor |
||||
|
||||
func (a byValue) Len() int { return len(a) } |
||||
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } |
||||
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } |
||||
|
||||
// GetGroupNames returns the list of Error group names that are registered
|
||||
func GetGroupNames() []string { |
||||
keys := []string{} |
||||
|
||||
for k := range groupToDescriptors { |
||||
keys = append(keys, k) |
||||
} |
||||
sort.Strings(keys) |
||||
return keys |
||||
} |
||||
|
||||
// GetErrorCodeGroup returns the named group of error descriptors
|
||||
func GetErrorCodeGroup(name string) []ErrorDescriptor { |
||||
desc := groupToDescriptors[name] |
||||
sort.Sort(byValue(desc)) |
||||
return desc |
||||
} |
||||
|
||||
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
|
||||
// registered, irrespective of what group they're in
|
||||
func GetErrorAllDescriptors() []ErrorDescriptor { |
||||
result := []ErrorDescriptor{} |
||||
|
||||
for _, group := range GetGroupNames() { |
||||
result = append(result, GetErrorCodeGroup(group)...) |
||||
} |
||||
sort.Sort(byValue(result)) |
||||
return result |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,191 @@ |
||||
|
||||
Apache License |
||||
Version 2.0, January 2004 |
||||
https://www.apache.org/licenses/ |
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION |
||||
|
||||
1. Definitions. |
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, |
||||
and distribution as defined by Sections 1 through 9 of this document. |
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by |
||||
the copyright owner that is granting the License. |
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all |
||||
other entities that control, are controlled by, or are under common |
||||
control with that entity. For the purposes of this definition, |
||||
"control" means (i) the power, direct or indirect, to cause the |
||||
direction or management of such entity, whether by contract or |
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the |
||||
outstanding shares, or (iii) beneficial ownership of such entity. |
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity |
||||
exercising permissions granted by this License. |
||||
|
||||
"Source" form shall mean the preferred form for making modifications, |
||||
including but not limited to software source code, documentation |
||||
source, and configuration files. |
||||
|
||||
"Object" form shall mean any form resulting from mechanical |
||||
transformation or translation of a Source form, including but |
||||
not limited to compiled object code, generated documentation, |
||||
and conversions to other media types. |
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or |
||||
Object form, made available under the License, as indicated by a |
||||
copyright notice that is included in or attached to the work |
||||
(an example is provided in the Appendix below). |
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object |
||||
form, that is based on (or derived from) the Work and for which the |
||||
editorial revisions, annotations, elaborations, or other modifications |
||||
represent, as a whole, an original work of authorship. For the purposes |
||||
of this License, Derivative Works shall not include works that remain |
||||
separable from, or merely link (or bind by name) to the interfaces of, |
||||
the Work and Derivative Works thereof. |
||||
|
||||
"Contribution" shall mean any work of authorship, including |
||||
the original version of the Work and any modifications or additions |
||||
to that Work or Derivative Works thereof, that is intentionally |
||||
submitted to Licensor for inclusion in the Work by the copyright owner |
||||
or by an individual or Legal Entity authorized to submit on behalf of |
||||
the copyright owner. For the purposes of this definition, "submitted" |
||||
means any form of electronic, verbal, or written communication sent |
||||
to the Licensor or its representatives, including but not limited to |
||||
communication on electronic mailing lists, source code control systems, |
||||
and issue tracking systems that are managed by, or on behalf of, the |
||||
Licensor for the purpose of discussing and improving the Work, but |
||||
excluding communication that is conspicuously marked or otherwise |
||||
designated in writing by the copyright owner as "Not a Contribution." |
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity |
||||
on behalf of whom a Contribution has been received by Licensor and |
||||
subsequently incorporated within the Work. |
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
copyright license to reproduce, prepare Derivative Works of, |
||||
publicly display, publicly perform, sublicense, and distribute the |
||||
Work and such Derivative Works in Source or Object form. |
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of |
||||
this License, each Contributor hereby grants to You a perpetual, |
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable |
||||
(except as stated in this section) patent license to make, have made, |
||||
use, offer to sell, sell, import, and otherwise transfer the Work, |
||||
where such license applies only to those patent claims licensable |
||||
by such Contributor that are necessarily infringed by their |
||||
Contribution(s) alone or by combination of their Contribution(s) |
||||
with the Work to which such Contribution(s) was submitted. If You |
||||
institute patent litigation against any entity (including a |
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work |
||||
or a Contribution incorporated within the Work constitutes direct |
||||
or contributory patent infringement, then any patent licenses |
||||
granted to You under this License for that Work shall terminate |
||||
as of the date such litigation is filed. |
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the |
||||
Work or Derivative Works thereof in any medium, with or without |
||||
modifications, and in Source or Object form, provided that You |
||||
meet the following conditions: |
||||
|
||||
(a) You must give any other recipients of the Work or |
||||
Derivative Works a copy of this License; and |
||||
|
||||
(b) You must cause any modified files to carry prominent notices |
||||
stating that You changed the files; and |
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works |
||||
that You distribute, all copyright, patent, trademark, and |
||||
attribution notices from the Source form of the Work, |
||||
excluding those notices that do not pertain to any part of |
||||
the Derivative Works; and |
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its |
||||
distribution, then any Derivative Works that You distribute must |
||||
include a readable copy of the attribution notices contained |
||||
within such NOTICE file, excluding those notices that do not |
||||
pertain to any part of the Derivative Works, in at least one |
||||
of the following places: within a NOTICE text file distributed |
||||
as part of the Derivative Works; within the Source form or |
||||
documentation, if provided along with the Derivative Works; or, |
||||
within a display generated by the Derivative Works, if and |
||||
wherever such third-party notices normally appear. The contents |
||||
of the NOTICE file are for informational purposes only and |
||||
do not modify the License. You may add Your own attribution |
||||
notices within Derivative Works that You distribute, alongside |
||||
or as an addendum to the NOTICE text from the Work, provided |
||||
that such additional attribution notices cannot be construed |
||||
as modifying the License. |
||||
|
||||
You may add Your own copyright statement to Your modifications and |
||||
may provide additional or different license terms and conditions |
||||
for use, reproduction, or distribution of Your modifications, or |
||||
for any such Derivative Works as a whole, provided Your use, |
||||
reproduction, and distribution of the Work otherwise complies with |
||||
the conditions stated in this License. |
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise, |
||||
any Contribution intentionally submitted for inclusion in the Work |
||||
by You to the Licensor shall be under the terms and conditions of |
||||
this License, without any additional terms or conditions. |
||||
Notwithstanding the above, nothing herein shall supersede or modify |
||||
the terms of any separate license agreement you may have executed |
||||
with Licensor regarding such Contributions. |
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade |
||||
names, trademarks, service marks, or product names of the Licensor, |
||||
except as required for reasonable and customary use in describing the |
||||
origin of the Work and reproducing the content of the NOTICE file. |
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or |
||||
agreed to in writing, Licensor provides the Work (and each |
||||
Contributor provides its Contributions) on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or |
||||
implied, including, without limitation, any warranties or conditions |
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A |
||||
PARTICULAR PURPOSE. You are solely responsible for determining the |
||||
appropriateness of using or redistributing the Work and assume any |
||||
risks associated with Your exercise of permissions under this License. |
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory, |
||||
whether in tort (including negligence), contract, or otherwise, |
||||
unless required by applicable law (such as deliberate and grossly |
||||
negligent acts) or agreed to in writing, shall any Contributor be |
||||
liable to You for damages, including any direct, indirect, special, |
||||
incidental, or consequential damages of any character arising as a |
||||
result of this License or out of the use or inability to use the |
||||
Work (including but not limited to damages for loss of goodwill, |
||||
work stoppage, computer failure or malfunction, or any and all |
||||
other commercial damages or losses), even if such Contributor |
||||
has been advised of the possibility of such damages. |
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing |
||||
the Work or Derivative Works thereof, You may choose to offer, |
||||
and charge a fee for, acceptance of support, warranty, indemnity, |
||||
or other liability obligations and/or rights consistent with this |
||||
License. However, in accepting such obligations, You may act only |
||||
on Your own behalf and on Your sole responsibility, not on behalf |
||||
of any other Contributor, and only if You agree to indemnify, |
||||
defend, and hold each Contributor harmless for any liability |
||||
incurred by, or claims asserted against, such Contributor by reason |
||||
of your accepting any such warranty or additional liability. |
||||
|
||||
END OF TERMS AND CONDITIONS |
||||
|
||||
Copyright 2013-2018 Docker, Inc. |
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); |
||||
you may not use this file except in compliance with the License. |
||||
You may obtain a copy of the License at |
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0 |
||||
|
||||
Unless required by applicable law or agreed to in writing, software |
||||
distributed under the License is distributed on an "AS IS" BASIS, |
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
||||
See the License for the specific language governing permissions and |
||||
limitations under the License. |
@ -0,0 +1,19 @@ |
||||
Docker |
||||
Copyright 2012-2017 Docker, Inc. |
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com). |
||||
|
||||
This product contains software (https://github.com/creack/pty) developed |
||||
by Keith Rarick, licensed under the MIT License. |
||||
|
||||
The following is courtesy of our legal counsel: |
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the |
||||
United States and other governments. |
||||
It is your responsibility to ensure that your use and/or transfer does not |
||||
violate applicable laws. |
||||
|
||||
For more information, please see https://www.bis.doc.gov |
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. |
@ -0,0 +1,42 @@ |
||||
# Working on the Engine API |
||||
|
||||
The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. |
||||
|
||||
It consists of various components in this repository: |
||||
|
||||
- `api/swagger.yaml` A Swagger definition of the API. |
||||
- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. |
||||
- `cli/` The command-line client. |
||||
- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. |
||||
- `daemon/` The daemon, which serves the API. |
||||
|
||||
## Swagger definition |
||||
|
||||
The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: |
||||
|
||||
1. Automatically generate documentation. |
||||
2. Automatically generate the Go server and client. (A work-in-progress.) |
||||
3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. |
||||
|
||||
## Updating the API documentation |
||||
|
||||
The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. |
||||
|
||||
The file is split into two main sections: |
||||
|
||||
- `definitions`, which defines re-usable objects used in requests and responses |
||||
- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) |
||||
|
||||
To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. |
||||
|
||||
There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). |
||||
|
||||
`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. |
||||
|
||||
## Viewing the API documentation |
||||
|
||||
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. |
||||
|
||||
Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. |
||||
|
||||
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). |
@ -0,0 +1,11 @@ |
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
// Common constants for daemon and client.
|
||||
const ( |
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion = "1.41" |
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
NoBaseImageSpecifier = "scratch" |
||||
) |
@ -0,0 +1,6 @@ |
||||
// +build !windows
|
||||
|
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
// MinVersion represents Minimum REST API version supported
|
||||
const MinVersion = "1.12" |
@ -0,0 +1,8 @@ |
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
// MinVersion represents Minimum REST API version supported
|
||||
// Technically the first daemon API version released on Windows is v1.25 in
|
||||
// engine version 1.13. However, some clients are explicitly using downlevel
|
||||
// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
|
||||
// Hence also allowing 1.24 on Windows.
|
||||
const MinVersion string = "1.24" |
@ -0,0 +1,12 @@ |
||||
|
||||
layout: |
||||
models: |
||||
- name: definition |
||||
source: asset:model |
||||
target: "{{ joinFilePath .Target .ModelPackage }}" |
||||
file_name: "{{ (snakize (pascalize .Name)) }}.go" |
||||
operations: |
||||
- name: handler |
||||
source: asset:serverOperation |
||||
target: "{{ joinFilePath .Target .APIPackage .Package }}" |
||||
file_name: "{{ (snakize (pascalize .Name)) }}.go" |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,22 @@ |
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
type AuthConfig struct { |
||||
Username string `json:"username,omitempty"` |
||||
Password string `json:"password,omitempty"` |
||||
Auth string `json:"auth,omitempty"` |
||||
|
||||
// Email is an optional value associated with the username.
|
||||
// This field is deprecated and will be removed in a later
|
||||
// version of docker.
|
||||
Email string `json:"email,omitempty"` |
||||
|
||||
ServerAddress string `json:"serveraddress,omitempty"` |
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"` |
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"` |
||||
} |
@ -0,0 +1,23 @@ |
||||
package blkiodev // import "github.com/docker/docker/api/types/blkiodev"
|
||||
|
||||
import "fmt" |
||||
|
||||
// WeightDevice is a structure that holds device:weight pair
|
||||
type WeightDevice struct { |
||||
Path string |
||||
Weight uint16 |
||||
} |
||||
|
||||
func (w *WeightDevice) String() string { |
||||
return fmt.Sprintf("%s:%d", w.Path, w.Weight) |
||||
} |
||||
|
||||
// ThrottleDevice is a structure that holds device:rate_per_second pair
|
||||
type ThrottleDevice struct { |
||||
Path string |
||||
Rate uint64 |
||||
} |
||||
|
||||
func (t *ThrottleDevice) String() string { |
||||
return fmt.Sprintf("%s:%d", t.Path, t.Rate) |
||||
} |
@ -0,0 +1,415 @@ |
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import ( |
||||
"bufio" |
||||
"io" |
||||
"net" |
||||
|
||||
"github.com/docker/docker/api/types/container" |
||||
"github.com/docker/docker/api/types/filters" |
||||
units "github.com/docker/go-units" |
||||
) |
||||
|
||||
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
|
||||
type CheckpointCreateOptions struct { |
||||
CheckpointID string |
||||
CheckpointDir string |
||||
Exit bool |
||||
} |
||||
|
||||
// CheckpointListOptions holds parameters to list checkpoints for a container
|
||||
type CheckpointListOptions struct { |
||||
CheckpointDir string |
||||
} |
||||
|
||||
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
|
||||
type CheckpointDeleteOptions struct { |
||||
CheckpointID string |
||||
CheckpointDir string |
||||
} |
||||
|
||||
// ContainerAttachOptions holds parameters to attach to a container.
|
||||
type ContainerAttachOptions struct { |
||||
Stream bool |
||||
Stdin bool |
||||
Stdout bool |
||||
Stderr bool |
||||
DetachKeys string |
||||
Logs bool |
||||
} |
||||
|
||||
// ContainerCommitOptions holds parameters to commit changes into a container.
|
||||
type ContainerCommitOptions struct { |
||||
Reference string |
||||
Comment string |
||||
Author string |
||||
Changes []string |
||||
Pause bool |
||||
Config *container.Config |
||||
} |
||||
|
||||
// ContainerExecInspect holds information returned by exec inspect.
|
||||
type ContainerExecInspect struct { |
||||
ExecID string |
||||
ContainerID string |
||||
Running bool |
||||
ExitCode int |
||||
Pid int |
||||
} |
||||
|
||||
// ContainerListOptions holds parameters to list containers with.
|
||||
type ContainerListOptions struct { |
||||
Quiet bool |
||||
Size bool |
||||
All bool |
||||
Latest bool |
||||
Since string |
||||
Before string |
||||
Limit int |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// ContainerLogsOptions holds parameters to filter logs with.
|
||||
type ContainerLogsOptions struct { |
||||
ShowStdout bool |
||||
ShowStderr bool |
||||
Since string |
||||
Until string |
||||
Timestamps bool |
||||
Follow bool |
||||
Tail string |
||||
Details bool |
||||
} |
||||
|
||||
// ContainerRemoveOptions holds parameters to remove containers.
|
||||
type ContainerRemoveOptions struct { |
||||
RemoveVolumes bool |
||||
RemoveLinks bool |
||||
Force bool |
||||
} |
||||
|
||||
// ContainerStartOptions holds parameters to start containers.
|
||||
type ContainerStartOptions struct { |
||||
CheckpointID string |
||||
CheckpointDir string |
||||
} |
||||
|
||||
// CopyToContainerOptions holds information
|
||||
// about files to copy into a container
|
||||
type CopyToContainerOptions struct { |
||||
AllowOverwriteDirWithFile bool |
||||
CopyUIDGID bool |
||||
} |
||||
|
||||
// EventsOptions holds parameters to filter events with.
|
||||
type EventsOptions struct { |
||||
Since string |
||||
Until string |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// NetworkListOptions holds parameters to filter the list of networks with.
|
||||
type NetworkListOptions struct { |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// HijackedResponse holds connection information for a hijacked request.
|
||||
type HijackedResponse struct { |
||||
Conn net.Conn |
||||
Reader *bufio.Reader |
||||
} |
||||
|
||||
// Close closes the hijacked connection and reader.
|
||||
func (h *HijackedResponse) Close() { |
||||
h.Conn.Close() |
||||
} |
||||
|
||||
// CloseWriter is an interface that implements structs
|
||||
// that close input streams to prevent from writing.
|
||||
type CloseWriter interface { |
||||
CloseWrite() error |
||||
} |
||||
|
||||
// CloseWrite closes a readWriter for writing.
|
||||
func (h *HijackedResponse) CloseWrite() error { |
||||
if conn, ok := h.Conn.(CloseWriter); ok { |
||||
return conn.CloseWrite() |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// ImageBuildOptions holds the information
|
||||
// necessary to build images.
|
||||
type ImageBuildOptions struct { |
||||
Tags []string |
||||
SuppressOutput bool |
||||
RemoteContext string |
||||
NoCache bool |
||||
Remove bool |
||||
ForceRemove bool |
||||
PullParent bool |
||||
Isolation container.Isolation |
||||
CPUSetCPUs string |
||||
CPUSetMems string |
||||
CPUShares int64 |
||||
CPUQuota int64 |
||||
CPUPeriod int64 |
||||
Memory int64 |
||||
MemorySwap int64 |
||||
CgroupParent string |
||||
NetworkMode string |
||||
ShmSize int64 |
||||
Dockerfile string |
||||
Ulimits []*units.Ulimit |
||||
// BuildArgs needs to be a *string instead of just a string so that
|
||||
// we can tell the difference between "" (empty string) and no value
|
||||
// at all (nil). See the parsing of buildArgs in
|
||||
// api/server/router/build/build_routes.go for even more info.
|
||||
BuildArgs map[string]*string |
||||
AuthConfigs map[string]AuthConfig |
||||
Context io.Reader |
||||
Labels map[string]string |
||||
// squash the resulting image's layers to the parent
|
||||
// preserves the original image and creates a new one from the parent with all
|
||||
// the changes applied to a single layer
|
||||
Squash bool |
||||
// CacheFrom specifies images that are used for matching cache. Images
|
||||
// specified here do not need to have a valid parent chain to match cache.
|
||||
CacheFrom []string |
||||
SecurityOpt []string |
||||
ExtraHosts []string // List of extra hosts
|
||||
Target string |
||||
SessionID string |
||||
Platform string |
||||
// Version specifies the version of the unerlying builder to use
|
||||
Version BuilderVersion |
||||
// BuildID is an optional identifier that can be passed together with the
|
||||
// build request. The same identifier can be used to gracefully cancel the
|
||||
// build with the cancel request.
|
||||
BuildID string |
||||
// Outputs defines configurations for exporting build results. Only supported
|
||||
// in BuildKit mode
|
||||
Outputs []ImageBuildOutput |
||||
} |
||||
|
||||
// ImageBuildOutput defines configuration for exporting a build result
|
||||
type ImageBuildOutput struct { |
||||
Type string |
||||
Attrs map[string]string |
||||
} |
||||
|
||||
// BuilderVersion sets the version of underlying builder to use
|
||||
type BuilderVersion string |
||||
|
||||
const ( |
||||
// BuilderV1 is the first generation builder in docker daemon
|
||||
BuilderV1 BuilderVersion = "1" |
||||
// BuilderBuildKit is builder based on moby/buildkit project
|
||||
BuilderBuildKit = "2" |
||||
) |
||||
|
||||
// ImageBuildResponse holds information
|
||||
// returned by a server after building
|
||||
// an image.
|
||||
type ImageBuildResponse struct { |
||||
Body io.ReadCloser |
||||
OSType string |
||||
} |
||||
|
||||
// ImageCreateOptions holds information to create images.
|
||||
type ImageCreateOptions struct { |
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
|
||||
Platform string // Platform is the target platform of the image if it needs to be pulled from the registry.
|
||||
} |
||||
|
||||
// ImageImportSource holds source information for ImageImport
|
||||
type ImageImportSource struct { |
||||
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
|
||||
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
|
||||
} |
||||
|
||||
// ImageImportOptions holds information to import images from the client host.
|
||||
type ImageImportOptions struct { |
||||
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
|
||||
Message string // Message is the message to tag the image with
|
||||
Changes []string // Changes are the raw changes to apply to this image
|
||||
Platform string // Platform is the target platform of the image
|
||||
} |
||||
|
||||
// ImageListOptions holds parameters to filter the list of images with.
|
||||
type ImageListOptions struct { |
||||
All bool |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// ImageLoadResponse returns information to the client about a load process.
|
||||
type ImageLoadResponse struct { |
||||
// Body must be closed to avoid a resource leak
|
||||
Body io.ReadCloser |
||||
JSON bool |
||||
} |
||||
|
||||
// ImagePullOptions holds information to pull images.
|
||||
type ImagePullOptions struct { |
||||
All bool |
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc |
||||
Platform string |
||||
} |
||||
|
||||
// RequestPrivilegeFunc is a function interface that
|
||||
// clients can supply to retry operations after
|
||||
// getting an authorization error.
|
||||
// This function returns the registry authentication
|
||||
// header value in base 64 format, or an error
|
||||
// if the privilege request fails.
|
||||
type RequestPrivilegeFunc func() (string, error) |
||||
|
||||
//ImagePushOptions holds information to push images.
|
||||
type ImagePushOptions ImagePullOptions |
||||
|
||||
// ImageRemoveOptions holds parameters to remove images.
|
||||
type ImageRemoveOptions struct { |
||||
Force bool |
||||
PruneChildren bool |
||||
} |
||||
|
||||
// ImageSearchOptions holds parameters to search images with.
|
||||
type ImageSearchOptions struct { |
||||
RegistryAuth string |
||||
PrivilegeFunc RequestPrivilegeFunc |
||||
Filters filters.Args |
||||
Limit int |
||||
} |
||||
|
||||
// ResizeOptions holds parameters to resize a tty.
|
||||
// It can be used to resize container ttys and
|
||||
// exec process ttys too.
|
||||
type ResizeOptions struct { |
||||
Height uint |
||||
Width uint |
||||
} |
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
type NodeListOptions struct { |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// NodeRemoveOptions holds parameters to remove nodes with.
|
||||
type NodeRemoveOptions struct { |
||||
Force bool |
||||
} |
||||
|
||||
// ServiceCreateOptions contains the options to use when creating a service.
|
||||
type ServiceCreateOptions struct { |
||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||
// use when updating the service.
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string |
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool |
||||
} |
||||
|
||||
// ServiceCreateResponse contains the information returned to a client
|
||||
// on the creation of a new service.
|
||||
type ServiceCreateResponse struct { |
||||
// ID is the ID of the created service.
|
||||
ID string |
||||
// Warnings is a set of non-fatal warning messages to pass on to the user.
|
||||
Warnings []string `json:",omitempty"` |
||||
} |
||||
|
||||
// Values for RegistryAuthFrom in ServiceUpdateOptions
|
||||
const ( |
||||
RegistryAuthFromSpec = "spec" |
||||
RegistryAuthFromPreviousSpec = "previous-spec" |
||||
) |
||||
|
||||
// ServiceUpdateOptions contains the options to be used for updating services.
|
||||
type ServiceUpdateOptions struct { |
||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||
// use when updating the service.
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string |
||||
|
||||
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
|
||||
// into this field. While it does open API users up to racy writes, most
|
||||
// users may not need that level of consistency in practice.
|
||||
|
||||
// RegistryAuthFrom specifies where to find the registry authorization
|
||||
// credentials if they are not given in EncodedRegistryAuth. Valid
|
||||
// values are "spec" and "previous-spec".
|
||||
RegistryAuthFrom string |
||||
|
||||
// Rollback indicates whether a server-side rollback should be
|
||||
// performed. When this is set, the provided spec will be ignored.
|
||||
// The valid values are "previous" and "none". An empty value is the
|
||||
// same as "none".
|
||||
Rollback string |
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool |
||||
} |
||||
|
||||
// ServiceListOptions holds parameters to list services with.
|
||||
type ServiceListOptions struct { |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// ServiceInspectOptions holds parameters related to the "service inspect"
|
||||
// operation.
|
||||
type ServiceInspectOptions struct { |
||||
InsertDefaults bool |
||||
} |
||||
|
||||
// TaskListOptions holds parameters to list tasks with.
|
||||
type TaskListOptions struct { |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// PluginRemoveOptions holds parameters to remove plugins.
|
||||
type PluginRemoveOptions struct { |
||||
Force bool |
||||
} |
||||
|
||||
// PluginEnableOptions holds parameters to enable plugins.
|
||||
type PluginEnableOptions struct { |
||||
Timeout int |
||||
} |
||||
|
||||
// PluginDisableOptions holds parameters to disable plugins.
|
||||
type PluginDisableOptions struct { |
||||
Force bool |
||||
} |
||||
|
||||
// PluginInstallOptions holds parameters to install a plugin.
|
||||
type PluginInstallOptions struct { |
||||
Disabled bool |
||||
AcceptAllPermissions bool |
||||
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
|
||||
RemoteRef string // RemoteRef is the plugin name on the registry
|
||||
PrivilegeFunc RequestPrivilegeFunc |
||||
AcceptPermissionsFunc func(PluginPrivileges) (bool, error) |
||||
Args []string |
||||
} |
||||
|
||||
// SwarmUnlockKeyResponse contains the response for Engine API:
|
||||
// GET /swarm/unlockkey
|
||||
type SwarmUnlockKeyResponse struct { |
||||
// UnlockKey is the unlock key in ASCII-armored format.
|
||||
UnlockKey string |
||||
} |
||||
|
||||
// PluginCreateOptions hold all options to plugin create.
|
||||
type PluginCreateOptions struct { |
||||
RepoName string |
||||
} |
@ -0,0 +1,64 @@ |
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import ( |
||||
"github.com/docker/docker/api/types/container" |
||||
"github.com/docker/docker/api/types/network" |
||||
) |
||||
|
||||
// configs holds structs used for internal communication between the
|
||||
// frontend (such as an http server) and the backend (such as the
|
||||
// docker daemon).
|
||||
|
||||
// ContainerCreateConfig is the parameter set to ContainerCreate()
|
||||
type ContainerCreateConfig struct { |
||||
Name string |
||||
Config *container.Config |
||||
HostConfig *container.HostConfig |
||||
NetworkingConfig *network.NetworkingConfig |
||||
AdjustCPUShares bool |
||||
} |
||||
|
||||
// ContainerRmConfig holds arguments for the container remove
|
||||
// operation. This struct is used to tell the backend what operations
|
||||
// to perform.
|
||||
type ContainerRmConfig struct { |
||||
ForceRemove, RemoveVolume, RemoveLink bool |
||||
} |
||||
|
||||
// ExecConfig is a small subset of the Config struct that holds the configuration
|
||||
// for the exec feature of docker.
|
||||
type ExecConfig struct { |
||||
User string // User that will run the command
|
||||
Privileged bool // Is the container in privileged mode
|
||||
Tty bool // Attach standard streams to a tty.
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStderr bool // Attach the standard error
|
||||
AttachStdout bool // Attach the standard output
|
||||
Detach bool // Execute in detach mode
|
||||
DetachKeys string // Escape keys for detach
|
||||
Env []string // Environment variables
|
||||
WorkingDir string // Working directory
|
||||
Cmd []string // Execution commands and args
|
||||
} |
||||
|
||||
// PluginRmConfig holds arguments for plugin remove.
|
||||
type PluginRmConfig struct { |
||||
ForceRemove bool |
||||
} |
||||
|
||||
// PluginEnableConfig holds arguments for plugin enable
|
||||
type PluginEnableConfig struct { |
||||
Timeout int |
||||
} |
||||
|
||||
// PluginDisableConfig holds arguments for plugin disable.
|
||||
type PluginDisableConfig struct { |
||||
ForceDisable bool |
||||
} |
||||
|
||||
// NetworkListConfig stores the options available for listing networks
|
||||
type NetworkListConfig struct { |
||||
// TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here
|
||||
Detailed bool |
||||
Verbose bool |
||||
} |
@ -0,0 +1,69 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/docker/docker/api/types/strslice" |
||||
"github.com/docker/go-connections/nat" |
||||
) |
||||
|
||||
// MinimumDuration puts a minimum on user configured duration.
|
||||
// This is to prevent API error on time unit. For example, API may
|
||||
// set 3 as healthcheck interval with intention of 3 seconds, but
|
||||
// Docker interprets it as 3 nanoseconds.
|
||||
const MinimumDuration = 1 * time.Millisecond |
||||
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig struct { |
||||
// Test is the test to perform to check that the container is healthy.
|
||||
// An empty slice means to inherit the default.
|
||||
// The options are:
|
||||
// {} : inherit healthcheck
|
||||
// {"NONE"} : disable healthcheck
|
||||
// {"CMD", args...} : exec arguments directly
|
||||
// {"CMD-SHELL", command} : run command with system's default shell
|
||||
Test []string `json:",omitempty"` |
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
Retries int `json:",omitempty"` |
||||
} |
||||
|
||||
// Config contains the configuration data about a container.
|
||||
// It should hold only portable information about the container.
|
||||
// Here, "portable" means "independent from the host we are running on".
|
||||
// Non-portable information *should* appear in HostConfig.
|
||||
// All fields added to this struct must be marked `omitempty` to keep getting
|
||||
// predictable hashes from the old `v1Compatibility` configuration.
|
||||
type Config struct { |
||||
Hostname string // Hostname
|
||||
Domainname string // Domainname
|
||||
User string // User that will run the command(s) inside the container, also support user:group
|
||||
AttachStdin bool // Attach the standard input, makes possible user interaction
|
||||
AttachStdout bool // Attach the standard output
|
||||
AttachStderr bool // Attach the standard error
|
||||
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
|
||||
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
|
||||
OpenStdin bool // Open stdin
|
||||
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
|
||||
Env []string // List of environment variable to set in the container
|
||||
Cmd strslice.StrSlice // Command to run when starting the container
|
||||
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific).
|
||||
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
|
||||
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
||||
WorkingDir string // Current directory (PWD) in the command will be launched
|
||||
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
|
||||
NetworkDisabled bool `json:",omitempty"` // Is network disabled
|
||||
MacAddress string `json:",omitempty"` // Mac Address of the container
|
||||
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
|
||||
Labels map[string]string // List of labels set to this container
|
||||
StopSignal string `json:",omitempty"` // Signal to stop a container
|
||||
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
|
||||
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
|
||||
} |
@ -0,0 +1,21 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerChangeResponseItem change item in response to ContainerChanges operation
|
||||
// swagger:model ContainerChangeResponseItem
|
||||
type ContainerChangeResponseItem struct { |
||||
|
||||
// Kind of change
|
||||
// Required: true
|
||||
Kind uint8 `json:"Kind"` |
||||
|
||||
// Path to file that has changed
|
||||
// Required: true
|
||||
Path string `json:"Path"` |
||||
} |
@ -0,0 +1,21 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerCreateCreatedBody OK response to ContainerCreate operation
|
||||
// swagger:model ContainerCreateCreatedBody
|
||||
type ContainerCreateCreatedBody struct { |
||||
|
||||
// The ID of the created container
|
||||
// Required: true
|
||||
ID string `json:"Id"` |
||||
|
||||
// Warnings encountered when creating the container
|
||||
// Required: true
|
||||
Warnings []string `json:"Warnings"` |
||||
} |
@ -0,0 +1,21 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerTopOKBody OK response to ContainerTop operation
|
||||
// swagger:model ContainerTopOKBody
|
||||
type ContainerTopOKBody struct { |
||||
|
||||
// Each process running in the container, where each is process is an array of values corresponding to the titles
|
||||
// Required: true
|
||||
Processes [][]string `json:"Processes"` |
||||
|
||||
// The ps column titles
|
||||
// Required: true
|
||||
Titles []string `json:"Titles"` |
||||
} |
@ -0,0 +1,17 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerUpdateOKBody OK response to ContainerUpdate operation
|
||||
// swagger:model ContainerUpdateOKBody
|
||||
type ContainerUpdateOKBody struct { |
||||
|
||||
// warnings
|
||||
// Required: true
|
||||
Warnings []string `json:"Warnings"` |
||||
} |
@ -0,0 +1,29 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// ContainerWaitOKBodyError container waiting error, if any
|
||||
// swagger:model ContainerWaitOKBodyError
|
||||
type ContainerWaitOKBodyError struct { |
||||
|
||||
// Details of an error
|
||||
Message string `json:"Message,omitempty"` |
||||
} |
||||
|
||||
// ContainerWaitOKBody OK response to ContainerWait operation
|
||||
// swagger:model ContainerWaitOKBody
|
||||
type ContainerWaitOKBody struct { |
||||
|
||||
// error
|
||||
// Required: true
|
||||
Error *ContainerWaitOKBodyError `json:"Error"` |
||||
|
||||
// Exit code of the container
|
||||
// Required: true
|
||||
StatusCode int64 `json:"StatusCode"` |
||||
} |
@ -0,0 +1,448 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
import ( |
||||
"strings" |
||||
|
||||
"github.com/docker/docker/api/types/blkiodev" |
||||
"github.com/docker/docker/api/types/mount" |
||||
"github.com/docker/docker/api/types/strslice" |
||||
"github.com/docker/go-connections/nat" |
||||
"github.com/docker/go-units" |
||||
) |
||||
|
||||
// CgroupnsMode represents the cgroup namespace mode of the container
|
||||
type CgroupnsMode string |
||||
|
||||
// IsPrivate indicates whether the container uses its own private cgroup namespace
|
||||
func (c CgroupnsMode) IsPrivate() bool { |
||||
return c == "private" |
||||
} |
||||
|
||||
// IsHost indicates whether the container shares the host's cgroup namespace
|
||||
func (c CgroupnsMode) IsHost() bool { |
||||
return c == "host" |
||||
} |
||||
|
||||
// IsEmpty indicates whether the container cgroup namespace mode is unset
|
||||
func (c CgroupnsMode) IsEmpty() bool { |
||||
return c == "" |
||||
} |
||||
|
||||
// Valid indicates whether the cgroup namespace mode is valid
|
||||
func (c CgroupnsMode) Valid() bool { |
||||
return c.IsEmpty() || c.IsPrivate() || c.IsHost() |
||||
} |
||||
|
||||
// Isolation represents the isolation technology of a container. The supported
|
||||
// values are platform specific
|
||||
type Isolation string |
||||
|
||||
// IsDefault indicates the default isolation technology of a container. On Linux this
|
||||
// is the native driver. On Windows, this is a Windows Server Container.
|
||||
func (i Isolation) IsDefault() bool { |
||||
return strings.ToLower(string(i)) == "default" || string(i) == "" |
||||
} |
||||
|
||||
// IsHyperV indicates the use of a Hyper-V partition for isolation
|
||||
func (i Isolation) IsHyperV() bool { |
||||
return strings.ToLower(string(i)) == "hyperv" |
||||
} |
||||
|
||||
// IsProcess indicates the use of process isolation
|
||||
func (i Isolation) IsProcess() bool { |
||||
return strings.ToLower(string(i)) == "process" |
||||
} |
||||
|
||||
const ( |
||||
// IsolationEmpty is unspecified (same behavior as default)
|
||||
IsolationEmpty = Isolation("") |
||||
// IsolationDefault is the default isolation mode on current daemon
|
||||
IsolationDefault = Isolation("default") |
||||
// IsolationProcess is process isolation mode
|
||||
IsolationProcess = Isolation("process") |
||||
// IsolationHyperV is HyperV isolation mode
|
||||
IsolationHyperV = Isolation("hyperv") |
||||
) |
||||
|
||||
// IpcMode represents the container ipc stack.
|
||||
type IpcMode string |
||||
|
||||
// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared.
|
||||
func (n IpcMode) IsPrivate() bool { |
||||
return n == "private" |
||||
} |
||||
|
||||
// IsHost indicates whether the container shares the host's ipc namespace.
|
||||
func (n IpcMode) IsHost() bool { |
||||
return n == "host" |
||||
} |
||||
|
||||
// IsShareable indicates whether the container's ipc namespace can be shared with another container.
|
||||
func (n IpcMode) IsShareable() bool { |
||||
return n == "shareable" |
||||
} |
||||
|
||||
// IsContainer indicates whether the container uses another container's ipc namespace.
|
||||
func (n IpcMode) IsContainer() bool { |
||||
parts := strings.SplitN(string(n), ":", 2) |
||||
return len(parts) > 1 && parts[0] == "container" |
||||
} |
||||
|
||||
// IsNone indicates whether container IpcMode is set to "none".
|
||||
func (n IpcMode) IsNone() bool { |
||||
return n == "none" |
||||
} |
||||
|
||||
// IsEmpty indicates whether container IpcMode is empty
|
||||
func (n IpcMode) IsEmpty() bool { |
||||
return n == "" |
||||
} |
||||
|
||||
// Valid indicates whether the ipc mode is valid.
|
||||
func (n IpcMode) Valid() bool { |
||||
return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() |
||||
} |
||||
|
||||
// Container returns the name of the container ipc stack is going to be used.
|
||||
func (n IpcMode) Container() string { |
||||
parts := strings.SplitN(string(n), ":", 2) |
||||
if len(parts) > 1 && parts[0] == "container" { |
||||
return parts[1] |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// NetworkMode represents the container network stack.
|
||||
type NetworkMode string |
||||
|
||||
// IsNone indicates whether container isn't using a network stack.
|
||||
func (n NetworkMode) IsNone() bool { |
||||
return n == "none" |
||||
} |
||||
|
||||
// IsDefault indicates whether container uses the default network stack.
|
||||
func (n NetworkMode) IsDefault() bool { |
||||
return n == "default" |
||||
} |
||||
|
||||
// IsPrivate indicates whether container uses its private network stack.
|
||||
func (n NetworkMode) IsPrivate() bool { |
||||
return !(n.IsHost() || n.IsContainer()) |
||||
} |
||||
|
||||
// IsContainer indicates whether container uses a container network stack.
|
||||
func (n NetworkMode) IsContainer() bool { |
||||
parts := strings.SplitN(string(n), ":", 2) |
||||
return len(parts) > 1 && parts[0] == "container" |
||||
} |
||||
|
||||
// ConnectedContainer is the id of the container which network this container is connected to.
|
||||
func (n NetworkMode) ConnectedContainer() string { |
||||
parts := strings.SplitN(string(n), ":", 2) |
||||
if len(parts) > 1 { |
||||
return parts[1] |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
//UserDefined indicates user-created network
|
||||
func (n NetworkMode) UserDefined() string { |
||||
if n.IsUserDefined() { |
||||
return string(n) |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// UsernsMode represents userns mode in the container.
|
||||
type UsernsMode string |
||||
|
||||
// IsHost indicates whether the container uses the host's userns.
|
||||
func (n UsernsMode) IsHost() bool { |
||||
return n == "host" |
||||
} |
||||
|
||||
// IsPrivate indicates whether the container uses the a private userns.
|
||||
func (n UsernsMode) IsPrivate() bool { |
||||
return !(n.IsHost()) |
||||
} |
||||
|
||||
// Valid indicates whether the userns is valid.
|
||||
func (n UsernsMode) Valid() bool { |
||||
parts := strings.Split(string(n), ":") |
||||
switch mode := parts[0]; mode { |
||||
case "", "host": |
||||
default: |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// CgroupSpec represents the cgroup to use for the container.
|
||||
type CgroupSpec string |
||||
|
||||
// IsContainer indicates whether the container is using another container cgroup
|
||||
func (c CgroupSpec) IsContainer() bool { |
||||
parts := strings.SplitN(string(c), ":", 2) |
||||
return len(parts) > 1 && parts[0] == "container" |
||||
} |
||||
|
||||
// Valid indicates whether the cgroup spec is valid.
|
||||
func (c CgroupSpec) Valid() bool { |
||||
return c.IsContainer() || c == "" |
||||
} |
||||
|
||||
// Container returns the name of the container whose cgroup will be used.
|
||||
func (c CgroupSpec) Container() string { |
||||
parts := strings.SplitN(string(c), ":", 2) |
||||
if len(parts) > 1 { |
||||
return parts[1] |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// UTSMode represents the UTS namespace of the container.
|
||||
type UTSMode string |
||||
|
||||
// IsPrivate indicates whether the container uses its private UTS namespace.
|
||||
func (n UTSMode) IsPrivate() bool { |
||||
return !(n.IsHost()) |
||||
} |
||||
|
||||
// IsHost indicates whether the container uses the host's UTS namespace.
|
||||
func (n UTSMode) IsHost() bool { |
||||
return n == "host" |
||||
} |
||||
|
||||
// Valid indicates whether the UTS namespace is valid.
|
||||
func (n UTSMode) Valid() bool { |
||||
parts := strings.Split(string(n), ":") |
||||
switch mode := parts[0]; mode { |
||||
case "", "host": |
||||
default: |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// PidMode represents the pid namespace of the container.
|
||||
type PidMode string |
||||
|
||||
// IsPrivate indicates whether the container uses its own new pid namespace.
|
||||
func (n PidMode) IsPrivate() bool { |
||||
return !(n.IsHost() || n.IsContainer()) |
||||
} |
||||
|
||||
// IsHost indicates whether the container uses the host's pid namespace.
|
||||
func (n PidMode) IsHost() bool { |
||||
return n == "host" |
||||
} |
||||
|
||||
// IsContainer indicates whether the container uses a container's pid namespace.
|
||||
func (n PidMode) IsContainer() bool { |
||||
parts := strings.SplitN(string(n), ":", 2) |
||||
return len(parts) > 1 && parts[0] == "container" |
||||
} |
||||
|
||||
// Valid indicates whether the pid namespace is valid.
|
||||
func (n PidMode) Valid() bool { |
||||
parts := strings.Split(string(n), ":") |
||||
switch mode := parts[0]; mode { |
||||
case "", "host": |
||||
case "container": |
||||
if len(parts) != 2 || parts[1] == "" { |
||||
return false |
||||
} |
||||
default: |
||||
return false |
||||
} |
||||
return true |
||||
} |
||||
|
||||
// Container returns the name of the container whose pid namespace is going to be used.
|
||||
func (n PidMode) Container() string { |
||||
parts := strings.SplitN(string(n), ":", 2) |
||||
if len(parts) > 1 { |
||||
return parts[1] |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// DeviceRequest represents a request for devices from a device driver.
|
||||
// Used by GPU device drivers.
|
||||
type DeviceRequest struct { |
||||
Driver string // Name of device driver
|
||||
Count int // Number of devices to request (-1 = All)
|
||||
DeviceIDs []string // List of device IDs as recognizable by the device driver
|
||||
Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu")
|
||||
Options map[string]string // Options to pass onto the device driver
|
||||
} |
||||
|
||||
// DeviceMapping represents the device mapping between the host and the container.
|
||||
type DeviceMapping struct { |
||||
PathOnHost string |
||||
PathInContainer string |
||||
CgroupPermissions string |
||||
} |
||||
|
||||
// RestartPolicy represents the restart policies of the container.
|
||||
type RestartPolicy struct { |
||||
Name string |
||||
MaximumRetryCount int |
||||
} |
||||
|
||||
// IsNone indicates whether the container has the "no" restart policy.
|
||||
// This means the container will not automatically restart when exiting.
|
||||
func (rp *RestartPolicy) IsNone() bool { |
||||
return rp.Name == "no" || rp.Name == "" |
||||
} |
||||
|
||||
// IsAlways indicates whether the container has the "always" restart policy.
|
||||
// This means the container will automatically restart regardless of the exit status.
|
||||
func (rp *RestartPolicy) IsAlways() bool { |
||||
return rp.Name == "always" |
||||
} |
||||
|
||||
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
|
||||
// This means the container will automatically restart of exiting with a non-zero exit status.
|
||||
func (rp *RestartPolicy) IsOnFailure() bool { |
||||
return rp.Name == "on-failure" |
||||
} |
||||
|
||||
// IsUnlessStopped indicates whether the container has the
|
||||
// "unless-stopped" restart policy. This means the container will
|
||||
// automatically restart unless user has put it to stopped state.
|
||||
func (rp *RestartPolicy) IsUnlessStopped() bool { |
||||
return rp.Name == "unless-stopped" |
||||
} |
||||
|
||||
// IsSame compares two RestartPolicy to see if they are the same
|
||||
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { |
||||
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount |
||||
} |
||||
|
||||
// LogMode is a type to define the available modes for logging
|
||||
// These modes affect how logs are handled when log messages start piling up.
|
||||
type LogMode string |
||||
|
||||
// Available logging modes
|
||||
const ( |
||||
LogModeUnset = "" |
||||
LogModeBlocking LogMode = "blocking" |
||||
LogModeNonBlock LogMode = "non-blocking" |
||||
) |
||||
|
||||
// LogConfig represents the logging configuration of the container.
|
||||
type LogConfig struct { |
||||
Type string |
||||
Config map[string]string |
||||
} |
||||
|
||||
// Resources contains container's resources (cgroups config, ulimits...)
|
||||
type Resources struct { |
||||
// Applicable to all platforms
|
||||
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
|
||||
Memory int64 // Memory limit (in bytes)
|
||||
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CgroupParent string // Parent cgroup.
|
||||
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
|
||||
BlkioWeightDevice []*blkiodev.WeightDevice |
||||
BlkioDeviceReadBps []*blkiodev.ThrottleDevice |
||||
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice |
||||
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice |
||||
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice |
||||
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
|
||||
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
|
||||
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
|
||||
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
|
||||
CpusetCpus string // CpusetCpus 0-2, 0,1
|
||||
CpusetMems string // CpusetMems 0-2, 0,1
|
||||
Devices []DeviceMapping // List of devices to map inside the container
|
||||
DeviceCgroupRules []string // List of rule to be added to the device cgroup
|
||||
DeviceRequests []DeviceRequest // List of device requests for device drivers
|
||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||
KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
|
||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||
|
||||
// Applicable to Windows
|
||||
CPUCount int64 `json:"CpuCount"` // CPU count
|
||||
CPUPercent int64 `json:"CpuPercent"` // CPU percent
|
||||
IOMaximumIOps uint64 // Maximum IOps for the container system drive
|
||||
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
|
||||
} |
||||
|
||||
// UpdateConfig holds the mutable attributes of a Container.
|
||||
// Those attributes can be updated at runtime.
|
||||
type UpdateConfig struct { |
||||
// Contains container's resources (cgroups, ulimits)
|
||||
Resources |
||||
RestartPolicy RestartPolicy |
||||
} |
||||
|
||||
// HostConfig the non-portable Config structure of a container.
|
||||
// Here, "non-portable" means "dependent of the host we are running on".
|
||||
// Portable information *should* appear in Config.
|
||||
type HostConfig struct { |
||||
// Applicable to all platforms
|
||||
Binds []string // List of volume bindings for this container
|
||||
ContainerIDFile string // File (path) where the containerId is written
|
||||
LogConfig LogConfig // Configuration of the logs for this container
|
||||
NetworkMode NetworkMode // Network mode to use for the container
|
||||
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
|
||||
RestartPolicy RestartPolicy // Restart policy to be used for the container
|
||||
AutoRemove bool // Automatically remove container when it exits
|
||||
VolumeDriver string // Name of the volume driver used to mount volumes
|
||||
VolumesFrom []string // List of volumes to take from other container
|
||||
|
||||
// Applicable to UNIX platforms
|
||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||
Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set)
|
||||
CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container
|
||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||
ExtraHosts []string // List of extra hosts
|
||||
GroupAdd []string // List of additional groups that the container process will run as
|
||||
IpcMode IpcMode // IPC namespace to use for the container
|
||||
Cgroup CgroupSpec // Cgroup to use for the container
|
||||
Links []string // List of links (in the name:alias form)
|
||||
OomScoreAdj int // Container preference for OOM-killing
|
||||
PidMode PidMode // PID namespace to use for the container
|
||||
Privileged bool // Is the container in privileged mode
|
||||
PublishAllPorts bool // Should docker publish all exposed port for the container
|
||||
ReadonlyRootfs bool // Is the container root filesystem in read-only
|
||||
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
|
||||
StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
|
||||
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
|
||||
UTSMode UTSMode // UTS namespace to use for the container
|
||||
UsernsMode UsernsMode // The user namespace to use for the container
|
||||
ShmSize int64 // Total shm memory usage
|
||||
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
|
||||
Runtime string `json:",omitempty"` // Runtime to use with this container
|
||||
|
||||
// Applicable to Windows
|
||||
ConsoleSize [2]uint // Initial console size (height,width)
|
||||
Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
|
||||
|
||||
// Contains container's resources (cgroups, ulimits)
|
||||
Resources |
||||
|
||||
// Mounts specs used by the container
|
||||
Mounts []mount.Mount `json:",omitempty"` |
||||
|
||||
// MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths)
|
||||
MaskedPaths []string |
||||
|
||||
// ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths)
|
||||
ReadonlyPaths []string |
||||
|
||||
// Run a custom init inside the container, if null, use the daemon's configured settings
|
||||
Init *bool `json:",omitempty"` |
||||
} |
@ -0,0 +1,41 @@ |
||||
// +build !windows
|
||||
|
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool { |
||||
return i.IsDefault() |
||||
} |
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string { |
||||
if n.IsBridge() { |
||||
return "bridge" |
||||
} else if n.IsHost() { |
||||
return "host" |
||||
} else if n.IsContainer() { |
||||
return "container" |
||||
} else if n.IsNone() { |
||||
return "none" |
||||
} else if n.IsDefault() { |
||||
return "default" |
||||
} else if n.IsUserDefined() { |
||||
return n.UserDefined() |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
func (n NetworkMode) IsBridge() bool { |
||||
return n == "bridge" |
||||
} |
||||
|
||||
// IsHost indicates whether container uses the host network stack.
|
||||
func (n NetworkMode) IsHost() bool { |
||||
return n == "host" |
||||
} |
||||
|
||||
// IsUserDefined indicates user-created network
|
||||
func (n NetworkMode) IsUserDefined() bool { |
||||
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() |
||||
} |
@ -0,0 +1,40 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// IsBridge indicates whether container uses the bridge network stack
|
||||
// in windows it is given the name NAT
|
||||
func (n NetworkMode) IsBridge() bool { |
||||
return n == "nat" |
||||
} |
||||
|
||||
// IsHost indicates whether container uses the host network stack.
|
||||
// returns false as this is not supported by windows
|
||||
func (n NetworkMode) IsHost() bool { |
||||
return false |
||||
} |
||||
|
||||
// IsUserDefined indicates user-created network
|
||||
func (n NetworkMode) IsUserDefined() bool { |
||||
return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() |
||||
} |
||||
|
||||
// IsValid indicates if an isolation technology is valid
|
||||
func (i Isolation) IsValid() bool { |
||||
return i.IsDefault() || i.IsHyperV() || i.IsProcess() |
||||
} |
||||
|
||||
// NetworkName returns the name of the network stack.
|
||||
func (n NetworkMode) NetworkName() string { |
||||
if n.IsDefault() { |
||||
return "default" |
||||
} else if n.IsBridge() { |
||||
return "nat" |
||||
} else if n.IsNone() { |
||||
return "none" |
||||
} else if n.IsContainer() { |
||||
return "container" |
||||
} else if n.IsUserDefined() { |
||||
return n.UserDefined() |
||||
} |
||||
|
||||
return "" |
||||
} |
@ -0,0 +1,22 @@ |
||||
package container // import "github.com/docker/docker/api/types/container"
|
||||
|
||||
// WaitCondition is a type used to specify a container state for which
|
||||
// to wait.
|
||||
type WaitCondition string |
||||
|
||||
// Possible WaitCondition Values.
|
||||
//
|
||||
// WaitConditionNotRunning (default) is used to wait for any of the non-running
|
||||
// states: "created", "exited", "dead", "removing", or "removed".
|
||||
//
|
||||
// WaitConditionNextExit is used to wait for the next time the state changes
|
||||
// to a non-running state. If the state is currently "created" or "exited",
|
||||
// this would cause Wait() to block until either the container runs and exits
|
||||
// or is removed.
|
||||
//
|
||||
// WaitConditionRemoved is used to wait for the container to be removed.
|
||||
const ( |
||||
WaitConditionNotRunning WaitCondition = "not-running" |
||||
WaitConditionNextExit WaitCondition = "next-exit" |
||||
WaitConditionRemoved WaitCondition = "removed" |
||||
) |
@ -0,0 +1,13 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ErrorResponse Represents an error.
|
||||
// swagger:model ErrorResponse
|
||||
type ErrorResponse struct { |
||||
|
||||
// The error message.
|
||||
// Required: true
|
||||
Message string `json:"message"` |
||||
} |
@ -0,0 +1,6 @@ |
||||
package types |
||||
|
||||
// Error returns the error message
|
||||
func (e ErrorResponse) Error() string { |
||||
return e.Message |
||||
} |
@ -0,0 +1,52 @@ |
||||
package events // import "github.com/docker/docker/api/types/events"
|
||||
|
||||
const ( |
||||
// ContainerEventType is the event type that containers generate
|
||||
ContainerEventType = "container" |
||||
// DaemonEventType is the event type that daemon generate
|
||||
DaemonEventType = "daemon" |
||||
// ImageEventType is the event type that images generate
|
||||
ImageEventType = "image" |
||||
// NetworkEventType is the event type that networks generate
|
||||
NetworkEventType = "network" |
||||
// PluginEventType is the event type that plugins generate
|
||||
PluginEventType = "plugin" |
||||
// VolumeEventType is the event type that volumes generate
|
||||
VolumeEventType = "volume" |
||||
// ServiceEventType is the event type that services generate
|
||||
ServiceEventType = "service" |
||||
// NodeEventType is the event type that nodes generate
|
||||
NodeEventType = "node" |
||||
// SecretEventType is the event type that secrets generate
|
||||
SecretEventType = "secret" |
||||
// ConfigEventType is the event type that configs generate
|
||||
ConfigEventType = "config" |
||||
) |
||||
|
||||
// Actor describes something that generates events,
|
||||
// like a container, or a network, or a volume.
|
||||
// It has a defined name and a set or attributes.
|
||||
// The container attributes are its labels, other actors
|
||||
// can generate these attributes from other properties.
|
||||
type Actor struct { |
||||
ID string |
||||
Attributes map[string]string |
||||
} |
||||
|
||||
// Message represents the information an event contains
|
||||
type Message struct { |
||||
// Deprecated information from JSONMessage.
|
||||
// With data only in container events.
|
||||
Status string `json:"status,omitempty"` |
||||
ID string `json:"id,omitempty"` |
||||
From string `json:"from,omitempty"` |
||||
|
||||
Type string |
||||
Action string |
||||
Actor Actor |
||||
// Engine events are local scope. Cluster events are swarm scope.
|
||||
Scope string `json:"scope,omitempty"` |
||||
|
||||
Time int64 `json:"time,omitempty"` |
||||
TimeNano int64 `json:"timeNano,omitempty"` |
||||
} |
@ -0,0 +1,315 @@ |
||||
/*Package filters provides tools for encoding a mapping of keys to a set of |
||||
multiple values. |
||||
*/ |
||||
package filters // import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"regexp" |
||||
"strings" |
||||
|
||||
"github.com/docker/docker/api/types/versions" |
||||
) |
||||
|
||||
// Args stores a mapping of keys to a set of multiple values.
|
||||
type Args struct { |
||||
fields map[string]map[string]bool |
||||
} |
||||
|
||||
// KeyValuePair are used to initialize a new Args
|
||||
type KeyValuePair struct { |
||||
Key string |
||||
Value string |
||||
} |
||||
|
||||
// Arg creates a new KeyValuePair for initializing Args
|
||||
func Arg(key, value string) KeyValuePair { |
||||
return KeyValuePair{Key: key, Value: value} |
||||
} |
||||
|
||||
// NewArgs returns a new Args populated with the initial args
|
||||
func NewArgs(initialArgs ...KeyValuePair) Args { |
||||
args := Args{fields: map[string]map[string]bool{}} |
||||
for _, arg := range initialArgs { |
||||
args.Add(arg.Key, arg.Value) |
||||
} |
||||
return args |
||||
} |
||||
|
||||
// MarshalJSON returns a JSON byte representation of the Args
|
||||
func (args Args) MarshalJSON() ([]byte, error) { |
||||
if len(args.fields) == 0 { |
||||
return []byte{}, nil |
||||
} |
||||
return json.Marshal(args.fields) |
||||
} |
||||
|
||||
// ToJSON returns the Args as a JSON encoded string
|
||||
func ToJSON(a Args) (string, error) { |
||||
if a.Len() == 0 { |
||||
return "", nil |
||||
} |
||||
buf, err := json.Marshal(a) |
||||
return string(buf), err |
||||
} |
||||
|
||||
// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
|
||||
// then the encoded format will use an older legacy format where the values are a
|
||||
// list of strings, instead of a set.
|
||||
//
|
||||
// Deprecated: Use ToJSON
|
||||
func ToParamWithVersion(version string, a Args) (string, error) { |
||||
if a.Len() == 0 { |
||||
return "", nil |
||||
} |
||||
|
||||
if version != "" && versions.LessThan(version, "1.22") { |
||||
buf, err := json.Marshal(convertArgsToSlice(a.fields)) |
||||
return string(buf), err |
||||
} |
||||
|
||||
return ToJSON(a) |
||||
} |
||||
|
||||
// FromJSON decodes a JSON encoded string into Args
|
||||
func FromJSON(p string) (Args, error) { |
||||
args := NewArgs() |
||||
|
||||
if p == "" { |
||||
return args, nil |
||||
} |
||||
|
||||
raw := []byte(p) |
||||
err := json.Unmarshal(raw, &args) |
||||
if err == nil { |
||||
return args, nil |
||||
} |
||||
|
||||
// Fallback to parsing arguments in the legacy slice format
|
||||
deprecated := map[string][]string{} |
||||
if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { |
||||
return args, err |
||||
} |
||||
|
||||
args.fields = deprecatedArgs(deprecated) |
||||
return args, nil |
||||
} |
||||
|
||||
// UnmarshalJSON populates the Args from JSON encode bytes
|
||||
func (args Args) UnmarshalJSON(raw []byte) error { |
||||
if len(raw) == 0 { |
||||
return nil |
||||
} |
||||
return json.Unmarshal(raw, &args.fields) |
||||
} |
||||
|
||||
// Get returns the list of values associated with the key
|
||||
func (args Args) Get(key string) []string { |
||||
values := args.fields[key] |
||||
if values == nil { |
||||
return make([]string, 0) |
||||
} |
||||
slice := make([]string, 0, len(values)) |
||||
for key := range values { |
||||
slice = append(slice, key) |
||||
} |
||||
return slice |
||||
} |
||||
|
||||
// Add a new value to the set of values
|
||||
func (args Args) Add(key, value string) { |
||||
if _, ok := args.fields[key]; ok { |
||||
args.fields[key][value] = true |
||||
} else { |
||||
args.fields[key] = map[string]bool{value: true} |
||||
} |
||||
} |
||||
|
||||
// Del removes a value from the set
|
||||
func (args Args) Del(key, value string) { |
||||
if _, ok := args.fields[key]; ok { |
||||
delete(args.fields[key], value) |
||||
if len(args.fields[key]) == 0 { |
||||
delete(args.fields, key) |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Len returns the number of keys in the mapping
|
||||
func (args Args) Len() int { |
||||
return len(args.fields) |
||||
} |
||||
|
||||
// MatchKVList returns true if all the pairs in sources exist as key=value
|
||||
// pairs in the mapping at key, or if there are no values at key.
|
||||
func (args Args) MatchKVList(key string, sources map[string]string) bool { |
||||
fieldValues := args.fields[key] |
||||
|
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 { |
||||
return true |
||||
} |
||||
|
||||
if len(sources) == 0 { |
||||
return false |
||||
} |
||||
|
||||
for value := range fieldValues { |
||||
testKV := strings.SplitN(value, "=", 2) |
||||
|
||||
v, ok := sources[testKV[0]] |
||||
if !ok { |
||||
return false |
||||
} |
||||
if len(testKV) == 2 && testKV[1] != v { |
||||
return false |
||||
} |
||||
} |
||||
|
||||
return true |
||||
} |
||||
|
||||
// Match returns true if any of the values at key match the source string
|
||||
func (args Args) Match(field, source string) bool { |
||||
if args.ExactMatch(field, source) { |
||||
return true |
||||
} |
||||
|
||||
fieldValues := args.fields[field] |
||||
for name2match := range fieldValues { |
||||
match, err := regexp.MatchString(name2match, source) |
||||
if err != nil { |
||||
continue |
||||
} |
||||
if match { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// ExactMatch returns true if the source matches exactly one of the values.
|
||||
func (args Args) ExactMatch(key, source string) bool { |
||||
fieldValues, ok := args.fields[key] |
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if !ok || len(fieldValues) == 0 { |
||||
return true |
||||
} |
||||
|
||||
// try to match full name value to avoid O(N) regular expression matching
|
||||
return fieldValues[source] |
||||
} |
||||
|
||||
// UniqueExactMatch returns true if there is only one value and the source
|
||||
// matches exactly the value.
|
||||
func (args Args) UniqueExactMatch(key, source string) bool { |
||||
fieldValues := args.fields[key] |
||||
//do not filter if there is no filter set or cannot determine filter
|
||||
if len(fieldValues) == 0 { |
||||
return true |
||||
} |
||||
if len(args.fields[key]) != 1 { |
||||
return false |
||||
} |
||||
|
||||
// try to match full name value to avoid O(N) regular expression matching
|
||||
return fieldValues[source] |
||||
} |
||||
|
||||
// FuzzyMatch returns true if the source matches exactly one value, or the
|
||||
// source has one of the values as a prefix.
|
||||
func (args Args) FuzzyMatch(key, source string) bool { |
||||
if args.ExactMatch(key, source) { |
||||
return true |
||||
} |
||||
|
||||
fieldValues := args.fields[key] |
||||
for prefix := range fieldValues { |
||||
if strings.HasPrefix(source, prefix) { |
||||
return true |
||||
} |
||||
} |
||||
return false |
||||
} |
||||
|
||||
// Contains returns true if the key exists in the mapping
|
||||
func (args Args) Contains(field string) bool { |
||||
_, ok := args.fields[field] |
||||
return ok |
||||
} |
||||
|
||||
type invalidFilter string |
||||
|
||||
func (e invalidFilter) Error() string { |
||||
return "Invalid filter '" + string(e) + "'" |
||||
} |
||||
|
||||
func (invalidFilter) InvalidParameter() {} |
||||
|
||||
// Validate compared the set of accepted keys against the keys in the mapping.
|
||||
// An error is returned if any mapping keys are not in the accepted set.
|
||||
func (args Args) Validate(accepted map[string]bool) error { |
||||
for name := range args.fields { |
||||
if !accepted[name] { |
||||
return invalidFilter(name) |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// WalkValues iterates over the list of values for a key in the mapping and calls
|
||||
// op() for each value. If op returns an error the iteration stops and the
|
||||
// error is returned.
|
||||
func (args Args) WalkValues(field string, op func(value string) error) error { |
||||
if _, ok := args.fields[field]; !ok { |
||||
return nil |
||||
} |
||||
for v := range args.fields[field] { |
||||
if err := op(v); err != nil { |
||||
return err |
||||
} |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// Clone returns a copy of args.
|
||||
func (args Args) Clone() (newArgs Args) { |
||||
newArgs.fields = make(map[string]map[string]bool, len(args.fields)) |
||||
for k, m := range args.fields { |
||||
var mm map[string]bool |
||||
if m != nil { |
||||
mm = make(map[string]bool, len(m)) |
||||
for kk, v := range m { |
||||
mm[kk] = v |
||||
} |
||||
} |
||||
newArgs.fields[k] = mm |
||||
} |
||||
return newArgs |
||||
} |
||||
|
||||
func deprecatedArgs(d map[string][]string) map[string]map[string]bool { |
||||
m := map[string]map[string]bool{} |
||||
for k, v := range d { |
||||
values := map[string]bool{} |
||||
for _, vv := range v { |
||||
values[vv] = true |
||||
} |
||||
m[k] = values |
||||
} |
||||
return m |
||||
} |
||||
|
||||
func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { |
||||
m := map[string][]string{} |
||||
for k, v := range f { |
||||
values := []string{} |
||||
for kk := range v { |
||||
if v[kk] { |
||||
values = append(values, kk) |
||||
} |
||||
} |
||||
m[k] = values |
||||
} |
||||
return m |
||||
} |
@ -0,0 +1,17 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// GraphDriverData Information about a container's graph driver.
|
||||
// swagger:model GraphDriverData
|
||||
type GraphDriverData struct { |
||||
|
||||
// data
|
||||
// Required: true
|
||||
Data map[string]string `json:"Data"` |
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"` |
||||
} |
@ -0,0 +1,13 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// IDResponse Response to an API call that returns just an Id
|
||||
// swagger:model IdResponse
|
||||
type IDResponse struct { |
||||
|
||||
// The id of the newly created object.
|
||||
// Required: true
|
||||
ID string `json:"Id"` |
||||
} |
@ -0,0 +1,37 @@ |
||||
package image // import "github.com/docker/docker/api/types/image"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// HistoryResponseItem individual image layer information in response to ImageHistory operation
|
||||
// swagger:model HistoryResponseItem
|
||||
type HistoryResponseItem struct { |
||||
|
||||
// comment
|
||||
// Required: true
|
||||
Comment string `json:"Comment"` |
||||
|
||||
// created
|
||||
// Required: true
|
||||
Created int64 `json:"Created"` |
||||
|
||||
// created by
|
||||
// Required: true
|
||||
CreatedBy string `json:"CreatedBy"` |
||||
|
||||
// Id
|
||||
// Required: true
|
||||
ID string `json:"Id"` |
||||
|
||||
// size
|
||||
// Required: true
|
||||
Size int64 `json:"Size"` |
||||
|
||||
// tags
|
||||
// Required: true
|
||||
Tags []string `json:"Tags"` |
||||
} |
@ -0,0 +1,15 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ImageDeleteResponseItem image delete response item
|
||||
// swagger:model ImageDeleteResponseItem
|
||||
type ImageDeleteResponseItem struct { |
||||
|
||||
// The image ID of an image that was deleted
|
||||
Deleted string `json:"Deleted,omitempty"` |
||||
|
||||
// The image ID of an image that was untagged
|
||||
Untagged string `json:"Untagged,omitempty"` |
||||
} |
@ -0,0 +1,49 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ImageSummary image summary
|
||||
// swagger:model ImageSummary
|
||||
type ImageSummary struct { |
||||
|
||||
// containers
|
||||
// Required: true
|
||||
Containers int64 `json:"Containers"` |
||||
|
||||
// created
|
||||
// Required: true
|
||||
Created int64 `json:"Created"` |
||||
|
||||
// Id
|
||||
// Required: true
|
||||
ID string `json:"Id"` |
||||
|
||||
// labels
|
||||
// Required: true
|
||||
Labels map[string]string `json:"Labels"` |
||||
|
||||
// parent Id
|
||||
// Required: true
|
||||
ParentID string `json:"ParentId"` |
||||
|
||||
// repo digests
|
||||
// Required: true
|
||||
RepoDigests []string `json:"RepoDigests"` |
||||
|
||||
// repo tags
|
||||
// Required: true
|
||||
RepoTags []string `json:"RepoTags"` |
||||
|
||||
// shared size
|
||||
// Required: true
|
||||
SharedSize int64 `json:"SharedSize"` |
||||
|
||||
// size
|
||||
// Required: true
|
||||
Size int64 `json:"Size"` |
||||
|
||||
// virtual size
|
||||
// Required: true
|
||||
VirtualSize int64 `json:"VirtualSize"` |
||||
} |
@ -0,0 +1,131 @@ |
||||
package mount // import "github.com/docker/docker/api/types/mount"
|
||||
|
||||
import ( |
||||
"os" |
||||
) |
||||
|
||||
// Type represents the type of a mount.
|
||||
type Type string |
||||
|
||||
// Type constants
|
||||
const ( |
||||
// TypeBind is the type for mounting host dir
|
||||
TypeBind Type = "bind" |
||||
// TypeVolume is the type for remote storage volumes
|
||||
TypeVolume Type = "volume" |
||||
// TypeTmpfs is the type for mounting tmpfs
|
||||
TypeTmpfs Type = "tmpfs" |
||||
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||
TypeNamedPipe Type = "npipe" |
||||
) |
||||
|
||||
// Mount represents a mount (volume).
|
||||
type Mount struct { |
||||
Type Type `json:",omitempty"` |
||||
// Source specifies the name of the mount. Depending on mount type, this
|
||||
// may be a volume name or a host path, or even ignored.
|
||||
// Source is not supported for tmpfs (must be an empty value)
|
||||
Source string `json:",omitempty"` |
||||
Target string `json:",omitempty"` |
||||
ReadOnly bool `json:",omitempty"` |
||||
Consistency Consistency `json:",omitempty"` |
||||
|
||||
BindOptions *BindOptions `json:",omitempty"` |
||||
VolumeOptions *VolumeOptions `json:",omitempty"` |
||||
TmpfsOptions *TmpfsOptions `json:",omitempty"` |
||||
} |
||||
|
||||
// Propagation represents the propagation of a mount.
|
||||
type Propagation string |
||||
|
||||
const ( |
||||
// PropagationRPrivate RPRIVATE
|
||||
PropagationRPrivate Propagation = "rprivate" |
||||
// PropagationPrivate PRIVATE
|
||||
PropagationPrivate Propagation = "private" |
||||
// PropagationRShared RSHARED
|
||||
PropagationRShared Propagation = "rshared" |
||||
// PropagationShared SHARED
|
||||
PropagationShared Propagation = "shared" |
||||
// PropagationRSlave RSLAVE
|
||||
PropagationRSlave Propagation = "rslave" |
||||
// PropagationSlave SLAVE
|
||||
PropagationSlave Propagation = "slave" |
||||
) |
||||
|
||||
// Propagations is the list of all valid mount propagations
|
||||
var Propagations = []Propagation{ |
||||
PropagationRPrivate, |
||||
PropagationPrivate, |
||||
PropagationRShared, |
||||
PropagationShared, |
||||
PropagationRSlave, |
||||
PropagationSlave, |
||||
} |
||||
|
||||
// Consistency represents the consistency requirements of a mount.
|
||||
type Consistency string |
||||
|
||||
const ( |
||||
// ConsistencyFull guarantees bind mount-like consistency
|
||||
ConsistencyFull Consistency = "consistent" |
||||
// ConsistencyCached mounts can cache read data and FS structure
|
||||
ConsistencyCached Consistency = "cached" |
||||
// ConsistencyDelegated mounts can cache read and written data and structure
|
||||
ConsistencyDelegated Consistency = "delegated" |
||||
// ConsistencyDefault provides "consistent" behavior unless overridden
|
||||
ConsistencyDefault Consistency = "default" |
||||
) |
||||
|
||||
// BindOptions defines options specific to mounts of type "bind".
|
||||
type BindOptions struct { |
||||
Propagation Propagation `json:",omitempty"` |
||||
NonRecursive bool `json:",omitempty"` |
||||
} |
||||
|
||||
// VolumeOptions represents the options for a mount of type volume.
|
||||
type VolumeOptions struct { |
||||
NoCopy bool `json:",omitempty"` |
||||
Labels map[string]string `json:",omitempty"` |
||||
DriverConfig *Driver `json:",omitempty"` |
||||
} |
||||
|
||||
// Driver represents a volume driver.
|
||||
type Driver struct { |
||||
Name string `json:",omitempty"` |
||||
Options map[string]string `json:",omitempty"` |
||||
} |
||||
|
||||
// TmpfsOptions defines options specific to mounts of type "tmpfs".
|
||||
type TmpfsOptions struct { |
||||
// Size sets the size of the tmpfs, in bytes.
|
||||
//
|
||||
// This will be converted to an operating system specific value
|
||||
// depending on the host. For example, on linux, it will be converted to
|
||||
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
|
||||
// docker, uses a straight byte value.
|
||||
//
|
||||
// Percentages are not supported.
|
||||
SizeBytes int64 `json:",omitempty"` |
||||
// Mode of the tmpfs upon creation
|
||||
Mode os.FileMode `json:",omitempty"` |
||||
|
||||
// TODO(stevvooe): There are several more tmpfs flags, specified in the
|
||||
// daemon, that are accepted. Only the most basic are added for now.
|
||||
//
|
||||
// From docker/docker/pkg/mount/flags.go:
|
||||
//
|
||||
// var validFlags = map[string]bool{
|
||||
// "": true,
|
||||
// "size": true, X
|
||||
// "mode": true, X
|
||||
// "uid": true,
|
||||
// "gid": true,
|
||||
// "nr_inodes": true,
|
||||
// "nr_blocks": true,
|
||||
// "mpol": true,
|
||||
// }
|
||||
//
|
||||
// Some of these may be straightforward to add, but others, such as
|
||||
// uid/gid have implications in a clustered system.
|
||||
} |
@ -0,0 +1,127 @@ |
||||
package network // import "github.com/docker/docker/api/types/network"
|
||||
import ( |
||||
"github.com/docker/docker/api/types/filters" |
||||
"github.com/docker/docker/errdefs" |
||||
) |
||||
|
||||
// Address represents an IP address
|
||||
type Address struct { |
||||
Addr string |
||||
PrefixLen int |
||||
} |
||||
|
||||
// IPAM represents IP Address Management
|
||||
type IPAM struct { |
||||
Driver string |
||||
Options map[string]string //Per network IPAM driver options
|
||||
Config []IPAMConfig |
||||
} |
||||
|
||||
// IPAMConfig represents IPAM configurations
|
||||
type IPAMConfig struct { |
||||
Subnet string `json:",omitempty"` |
||||
IPRange string `json:",omitempty"` |
||||
Gateway string `json:",omitempty"` |
||||
AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` |
||||
} |
||||
|
||||
// EndpointIPAMConfig represents IPAM configurations for the endpoint
|
||||
type EndpointIPAMConfig struct { |
||||
IPv4Address string `json:",omitempty"` |
||||
IPv6Address string `json:",omitempty"` |
||||
LinkLocalIPs []string `json:",omitempty"` |
||||
} |
||||
|
||||
// Copy makes a copy of the endpoint ipam config
|
||||
func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { |
||||
cfgCopy := *cfg |
||||
cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) |
||||
cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) |
||||
return &cfgCopy |
||||
} |
||||
|
||||
// PeerInfo represents one peer of an overlay network
|
||||
type PeerInfo struct { |
||||
Name string |
||||
IP string |
||||
} |
||||
|
||||
// EndpointSettings stores the network endpoint details
|
||||
type EndpointSettings struct { |
||||
// Configurations
|
||||
IPAMConfig *EndpointIPAMConfig |
||||
Links []string |
||||
Aliases []string |
||||
// Operational data
|
||||
NetworkID string |
||||
EndpointID string |
||||
Gateway string |
||||
IPAddress string |
||||
IPPrefixLen int |
||||
IPv6Gateway string |
||||
GlobalIPv6Address string |
||||
GlobalIPv6PrefixLen int |
||||
MacAddress string |
||||
DriverOpts map[string]string |
||||
} |
||||
|
||||
// Task carries the information about one backend task
|
||||
type Task struct { |
||||
Name string |
||||
EndpointID string |
||||
EndpointIP string |
||||
Info map[string]string |
||||
} |
||||
|
||||
// ServiceInfo represents service parameters with the list of service's tasks
|
||||
type ServiceInfo struct { |
||||
VIP string |
||||
Ports []string |
||||
LocalLBIndex int |
||||
Tasks []Task |
||||
} |
||||
|
||||
// Copy makes a deep copy of `EndpointSettings`
|
||||
func (es *EndpointSettings) Copy() *EndpointSettings { |
||||
epCopy := *es |
||||
if es.IPAMConfig != nil { |
||||
epCopy.IPAMConfig = es.IPAMConfig.Copy() |
||||
} |
||||
|
||||
if es.Links != nil { |
||||
links := make([]string, 0, len(es.Links)) |
||||
epCopy.Links = append(links, es.Links...) |
||||
} |
||||
|
||||
if es.Aliases != nil { |
||||
aliases := make([]string, 0, len(es.Aliases)) |
||||
epCopy.Aliases = append(aliases, es.Aliases...) |
||||
} |
||||
return &epCopy |
||||
} |
||||
|
||||
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
||||
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
|
||||
type NetworkingConfig struct { |
||||
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
|
||||
} |
||||
|
||||
// ConfigReference specifies the source which provides a network's configuration
|
||||
type ConfigReference struct { |
||||
Network string |
||||
} |
||||
|
||||
var acceptedFilters = map[string]bool{ |
||||
"dangling": true, |
||||
"driver": true, |
||||
"id": true, |
||||
"label": true, |
||||
"name": true, |
||||
"scope": true, |
||||
"type": true, |
||||
} |
||||
|
||||
// ValidateFilters validates the list of filter args with the available filters.
|
||||
func ValidateFilters(filter filters.Args) error { |
||||
return errdefs.InvalidParameter(filter.Validate(acceptedFilters)) |
||||
} |
@ -0,0 +1,203 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// Plugin A plugin for the Engine API
|
||||
// swagger:model Plugin
|
||||
type Plugin struct { |
||||
|
||||
// config
|
||||
// Required: true
|
||||
Config PluginConfig `json:"Config"` |
||||
|
||||
// True if the plugin is running. False if the plugin is not running, only installed.
|
||||
// Required: true
|
||||
Enabled bool `json:"Enabled"` |
||||
|
||||
// Id
|
||||
ID string `json:"Id,omitempty"` |
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"` |
||||
|
||||
// plugin remote reference used to push/pull the plugin
|
||||
PluginReference string `json:"PluginReference,omitempty"` |
||||
|
||||
// settings
|
||||
// Required: true
|
||||
Settings PluginSettings `json:"Settings"` |
||||
} |
||||
|
||||
// PluginConfig The config of a plugin.
|
||||
// swagger:model PluginConfig
|
||||
type PluginConfig struct { |
||||
|
||||
// args
|
||||
// Required: true
|
||||
Args PluginConfigArgs `json:"Args"` |
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"` |
||||
|
||||
// Docker Version used to create the plugin
|
||||
DockerVersion string `json:"DockerVersion,omitempty"` |
||||
|
||||
// documentation
|
||||
// Required: true
|
||||
Documentation string `json:"Documentation"` |
||||
|
||||
// entrypoint
|
||||
// Required: true
|
||||
Entrypoint []string `json:"Entrypoint"` |
||||
|
||||
// env
|
||||
// Required: true
|
||||
Env []PluginEnv `json:"Env"` |
||||
|
||||
// interface
|
||||
// Required: true
|
||||
Interface PluginConfigInterface `json:"Interface"` |
||||
|
||||
// ipc host
|
||||
// Required: true
|
||||
IpcHost bool `json:"IpcHost"` |
||||
|
||||
// linux
|
||||
// Required: true
|
||||
Linux PluginConfigLinux `json:"Linux"` |
||||
|
||||
// mounts
|
||||
// Required: true
|
||||
Mounts []PluginMount `json:"Mounts"` |
||||
|
||||
// network
|
||||
// Required: true
|
||||
Network PluginConfigNetwork `json:"Network"` |
||||
|
||||
// pid host
|
||||
// Required: true
|
||||
PidHost bool `json:"PidHost"` |
||||
|
||||
// propagated mount
|
||||
// Required: true
|
||||
PropagatedMount string `json:"PropagatedMount"` |
||||
|
||||
// user
|
||||
User PluginConfigUser `json:"User,omitempty"` |
||||
|
||||
// work dir
|
||||
// Required: true
|
||||
WorkDir string `json:"WorkDir"` |
||||
|
||||
// rootfs
|
||||
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` |
||||
} |
||||
|
||||
// PluginConfigArgs plugin config args
|
||||
// swagger:model PluginConfigArgs
|
||||
type PluginConfigArgs struct { |
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"` |
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"` |
||||
|
||||
// settable
|
||||
// Required: true
|
||||
Settable []string `json:"Settable"` |
||||
|
||||
// value
|
||||
// Required: true
|
||||
Value []string `json:"Value"` |
||||
} |
||||
|
||||
// PluginConfigInterface The interface between Docker and the plugin
|
||||
// swagger:model PluginConfigInterface
|
||||
type PluginConfigInterface struct { |
||||
|
||||
// Protocol to use for clients connecting to the plugin.
|
||||
ProtocolScheme string `json:"ProtocolScheme,omitempty"` |
||||
|
||||
// socket
|
||||
// Required: true
|
||||
Socket string `json:"Socket"` |
||||
|
||||
// types
|
||||
// Required: true
|
||||
Types []PluginInterfaceType `json:"Types"` |
||||
} |
||||
|
||||
// PluginConfigLinux plugin config linux
|
||||
// swagger:model PluginConfigLinux
|
||||
type PluginConfigLinux struct { |
||||
|
||||
// allow all devices
|
||||
// Required: true
|
||||
AllowAllDevices bool `json:"AllowAllDevices"` |
||||
|
||||
// capabilities
|
||||
// Required: true
|
||||
Capabilities []string `json:"Capabilities"` |
||||
|
||||
// devices
|
||||
// Required: true
|
||||
Devices []PluginDevice `json:"Devices"` |
||||
} |
||||
|
||||
// PluginConfigNetwork plugin config network
|
||||
// swagger:model PluginConfigNetwork
|
||||
type PluginConfigNetwork struct { |
||||
|
||||
// type
|
||||
// Required: true
|
||||
Type string `json:"Type"` |
||||
} |
||||
|
||||
// PluginConfigRootfs plugin config rootfs
|
||||
// swagger:model PluginConfigRootfs
|
||||
type PluginConfigRootfs struct { |
||||
|
||||
// diff ids
|
||||
DiffIds []string `json:"diff_ids"` |
||||
|
||||
// type
|
||||
Type string `json:"type,omitempty"` |
||||
} |
||||
|
||||
// PluginConfigUser plugin config user
|
||||
// swagger:model PluginConfigUser
|
||||
type PluginConfigUser struct { |
||||
|
||||
// g ID
|
||||
GID uint32 `json:"GID,omitempty"` |
||||
|
||||
// UID
|
||||
UID uint32 `json:"UID,omitempty"` |
||||
} |
||||
|
||||
// PluginSettings Settings that can be modified by users.
|
||||
// swagger:model PluginSettings
|
||||
type PluginSettings struct { |
||||
|
||||
// args
|
||||
// Required: true
|
||||
Args []string `json:"Args"` |
||||
|
||||
// devices
|
||||
// Required: true
|
||||
Devices []PluginDevice `json:"Devices"` |
||||
|
||||
// env
|
||||
// Required: true
|
||||
Env []string `json:"Env"` |
||||
|
||||
// mounts
|
||||
// Required: true
|
||||
Mounts []PluginMount `json:"Mounts"` |
||||
} |
@ -0,0 +1,25 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// PluginDevice plugin device
|
||||
// swagger:model PluginDevice
|
||||
type PluginDevice struct { |
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"` |
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"` |
||||
|
||||
// path
|
||||
// Required: true
|
||||
Path *string `json:"Path"` |
||||
|
||||
// settable
|
||||
// Required: true
|
||||
Settable []string `json:"Settable"` |
||||
} |
@ -0,0 +1,25 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// PluginEnv plugin env
|
||||
// swagger:model PluginEnv
|
||||
type PluginEnv struct { |
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"` |
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"` |
||||
|
||||
// settable
|
||||
// Required: true
|
||||
Settable []string `json:"Settable"` |
||||
|
||||
// value
|
||||
// Required: true
|
||||
Value *string `json:"Value"` |
||||
} |
@ -0,0 +1,21 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// PluginInterfaceType plugin interface type
|
||||
// swagger:model PluginInterfaceType
|
||||
type PluginInterfaceType struct { |
||||
|
||||
// capability
|
||||
// Required: true
|
||||
Capability string `json:"Capability"` |
||||
|
||||
// prefix
|
||||
// Required: true
|
||||
Prefix string `json:"Prefix"` |
||||
|
||||
// version
|
||||
// Required: true
|
||||
Version string `json:"Version"` |
||||
} |
@ -0,0 +1,37 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// PluginMount plugin mount
|
||||
// swagger:model PluginMount
|
||||
type PluginMount struct { |
||||
|
||||
// description
|
||||
// Required: true
|
||||
Description string `json:"Description"` |
||||
|
||||
// destination
|
||||
// Required: true
|
||||
Destination string `json:"Destination"` |
||||
|
||||
// name
|
||||
// Required: true
|
||||
Name string `json:"Name"` |
||||
|
||||
// options
|
||||
// Required: true
|
||||
Options []string `json:"Options"` |
||||
|
||||
// settable
|
||||
// Required: true
|
||||
Settable []string `json:"Settable"` |
||||
|
||||
// source
|
||||
// Required: true
|
||||
Source *string `json:"Source"` |
||||
|
||||
// type
|
||||
// Required: true
|
||||
Type string `json:"Type"` |
||||
} |
@ -0,0 +1,71 @@ |
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"fmt" |
||||
"sort" |
||||
) |
||||
|
||||
// PluginsListResponse contains the response for the Engine API
|
||||
type PluginsListResponse []*Plugin |
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
|
||||
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { |
||||
versionIndex := len(p) |
||||
prefixIndex := 0 |
||||
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { |
||||
return fmt.Errorf("%q is not a plugin interface type", p) |
||||
} |
||||
p = p[1 : len(p)-1] |
||||
loop: |
||||
for i, b := range p { |
||||
switch b { |
||||
case '.': |
||||
prefixIndex = i |
||||
case '/': |
||||
versionIndex = i |
||||
break loop |
||||
} |
||||
} |
||||
t.Prefix = string(p[:prefixIndex]) |
||||
t.Capability = string(p[prefixIndex+1 : versionIndex]) |
||||
if versionIndex < len(p) { |
||||
t.Version = string(p[versionIndex+1:]) |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// MarshalJSON implements json.Marshaler for PluginInterfaceType
|
||||
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal(t.String()) |
||||
} |
||||
|
||||
// String implements fmt.Stringer for PluginInterfaceType
|
||||
func (t PluginInterfaceType) String() string { |
||||
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) |
||||
} |
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
// upon installing a plugin.
|
||||
type PluginPrivilege struct { |
||||
Name string |
||||
Description string |
||||
Value []string |
||||
} |
||||
|
||||
// PluginPrivileges is a list of PluginPrivilege
|
||||
type PluginPrivileges []PluginPrivilege |
||||
|
||||
func (s PluginPrivileges) Len() int { |
||||
return len(s) |
||||
} |
||||
|
||||
func (s PluginPrivileges) Less(i, j int) bool { |
||||
return s[i].Name < s[j].Name |
||||
} |
||||
|
||||
func (s PluginPrivileges) Swap(i, j int) { |
||||
sort.Strings(s[i].Value) |
||||
sort.Strings(s[j].Value) |
||||
s[i], s[j] = s[j], s[i] |
||||
} |
@ -0,0 +1,23 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// Port An open port on a container
|
||||
// swagger:model Port
|
||||
type Port struct { |
||||
|
||||
// Host IP address that the container's port is mapped to
|
||||
IP string `json:"IP,omitempty"` |
||||
|
||||
// Port on the container
|
||||
// Required: true
|
||||
PrivatePort uint16 `json:"PrivatePort"` |
||||
|
||||
// Port exposed on the host
|
||||
PublicPort uint16 `json:"PublicPort,omitempty"` |
||||
|
||||
// type
|
||||
// Required: true
|
||||
Type string `json:"Type"` |
||||
} |
@ -0,0 +1,21 @@ |
||||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// DO NOT EDIT THIS FILE
|
||||
// This file was generated by `swagger generate operation`
|
||||
//
|
||||
// See hack/generate-swagger-api.sh
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// AuthenticateOKBody authenticate o k body
|
||||
// swagger:model AuthenticateOKBody
|
||||
type AuthenticateOKBody struct { |
||||
|
||||
// An opaque token used to authenticate a user after a successful login
|
||||
// Required: true
|
||||
IdentityToken string `json:"IdentityToken"` |
||||
|
||||
// The status of the authentication
|
||||
// Required: true
|
||||
Status string `json:"Status"` |
||||
} |
@ -0,0 +1,119 @@ |
||||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
|
||||
import ( |
||||
"encoding/json" |
||||
"net" |
||||
|
||||
"github.com/opencontainers/image-spec/specs-go/v1" |
||||
) |
||||
|
||||
// ServiceConfig stores daemon registry services configuration.
|
||||
type ServiceConfig struct { |
||||
AllowNondistributableArtifactsCIDRs []*NetIPNet |
||||
AllowNondistributableArtifactsHostnames []string |
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` |
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` |
||||
Mirrors []string |
||||
} |
||||
|
||||
// NetIPNet is the net.IPNet type, which can be marshalled and
|
||||
// unmarshalled to JSON
|
||||
type NetIPNet net.IPNet |
||||
|
||||
// String returns the CIDR notation of ipnet
|
||||
func (ipnet *NetIPNet) String() string { |
||||
return (*net.IPNet)(ipnet).String() |
||||
} |
||||
|
||||
// MarshalJSON returns the JSON representation of the IPNet
|
||||
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { |
||||
return json.Marshal((*net.IPNet)(ipnet).String()) |
||||
} |
||||
|
||||
// UnmarshalJSON sets the IPNet from a byte array of JSON
|
||||
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { |
||||
var ipnetStr string |
||||
if err = json.Unmarshal(b, &ipnetStr); err == nil { |
||||
var cidr *net.IPNet |
||||
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { |
||||
*ipnet = NetIPNet(*cidr) |
||||
} |
||||
} |
||||
return |
||||
} |
||||
|
||||
// IndexInfo contains information about a registry
|
||||
//
|
||||
// RepositoryInfo Examples:
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "docker.io",
|
||||
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
|
||||
// "Secure" : true,
|
||||
// "Official" : true,
|
||||
// },
|
||||
// "RemoteName" : "library/debian",
|
||||
// "LocalName" : "debian",
|
||||
// "CanonicalName" : "docker.io/debian"
|
||||
// "Official" : true,
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "Index" : {
|
||||
// "Name" : "127.0.0.1:5000",
|
||||
// "Mirrors" : [],
|
||||
// "Secure" : false,
|
||||
// "Official" : false,
|
||||
// },
|
||||
// "RemoteName" : "user/repo",
|
||||
// "LocalName" : "127.0.0.1:5000/user/repo",
|
||||
// "CanonicalName" : "127.0.0.1:5000/user/repo",
|
||||
// "Official" : false,
|
||||
// }
|
||||
type IndexInfo struct { |
||||
// Name is the name of the registry, such as "docker.io"
|
||||
Name string |
||||
// Mirrors is a list of mirrors, expressed as URIs
|
||||
Mirrors []string |
||||
// Secure is set to false if the registry is part of the list of
|
||||
// insecure registries. Insecure registries accept HTTP and/or accept
|
||||
// HTTPS with certificates from unknown CAs.
|
||||
Secure bool |
||||
// Official indicates whether this is an official registry
|
||||
Official bool |
||||
} |
||||
|
||||
// SearchResult describes a search result returned from a registry
|
||||
type SearchResult struct { |
||||
// StarCount indicates the number of stars this repository has
|
||||
StarCount int `json:"star_count"` |
||||
// IsOfficial is true if the result is from an official repository.
|
||||
IsOfficial bool `json:"is_official"` |
||||
// Name is the name of the repository
|
||||
Name string `json:"name"` |
||||
// IsAutomated indicates whether the result is automated
|
||||
IsAutomated bool `json:"is_automated"` |
||||
// Description is a textual description of the repository
|
||||
Description string `json:"description"` |
||||
} |
||||
|
||||
// SearchResults lists a collection search results returned from a registry
|
||||
type SearchResults struct { |
||||
// Query contains the query string that generated the search results
|
||||
Query string `json:"query"` |
||||
// NumResults indicates the number of results the query returned
|
||||
NumResults int `json:"num_results"` |
||||
// Results is a slice containing the actual results for the search
|
||||
Results []SearchResult `json:"results"` |
||||
} |
||||
|
||||
// DistributionInspect describes the result obtained from contacting the
|
||||
// registry to retrieve image metadata
|
||||
type DistributionInspect struct { |
||||
// Descriptor contains information about the manifest, including
|
||||
// the content addressable digest
|
||||
Descriptor v1.Descriptor |
||||
// Platforms contains the list of platforms supported by the image,
|
||||
// obtained by parsing the manifest
|
||||
Platforms []v1.Platform |
||||
} |
@ -0,0 +1,94 @@ |
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
// Seccomp represents the config for a seccomp profile for syscall restriction.
|
||||
type Seccomp struct { |
||||
DefaultAction Action `json:"defaultAction"` |
||||
// Architectures is kept to maintain backward compatibility with the old
|
||||
// seccomp profile.
|
||||
Architectures []Arch `json:"architectures,omitempty"` |
||||
ArchMap []Architecture `json:"archMap,omitempty"` |
||||
Syscalls []*Syscall `json:"syscalls"` |
||||
} |
||||
|
||||
// Architecture is used to represent a specific architecture
|
||||
// and its sub-architectures
|
||||
type Architecture struct { |
||||
Arch Arch `json:"architecture"` |
||||
SubArches []Arch `json:"subArchitectures"` |
||||
} |
||||
|
||||
// Arch used for architectures
|
||||
type Arch string |
||||
|
||||
// Additional architectures permitted to be used for system calls
|
||||
// By default only the native architecture of the kernel is permitted
|
||||
const ( |
||||
ArchX86 Arch = "SCMP_ARCH_X86" |
||||
ArchX86_64 Arch = "SCMP_ARCH_X86_64" |
||||
ArchX32 Arch = "SCMP_ARCH_X32" |
||||
ArchARM Arch = "SCMP_ARCH_ARM" |
||||
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" |
||||
ArchMIPS Arch = "SCMP_ARCH_MIPS" |
||||
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" |
||||
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" |
||||
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" |
||||
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" |
||||
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" |
||||
ArchPPC Arch = "SCMP_ARCH_PPC" |
||||
ArchPPC64 Arch = "SCMP_ARCH_PPC64" |
||||
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" |
||||
ArchS390 Arch = "SCMP_ARCH_S390" |
||||
ArchS390X Arch = "SCMP_ARCH_S390X" |
||||
) |
||||
|
||||
// Action taken upon Seccomp rule match
|
||||
type Action string |
||||
|
||||
// Define actions for Seccomp rules
|
||||
const ( |
||||
ActKill Action = "SCMP_ACT_KILL" |
||||
ActTrap Action = "SCMP_ACT_TRAP" |
||||
ActErrno Action = "SCMP_ACT_ERRNO" |
||||
ActTrace Action = "SCMP_ACT_TRACE" |
||||
ActAllow Action = "SCMP_ACT_ALLOW" |
||||
) |
||||
|
||||
// Operator used to match syscall arguments in Seccomp
|
||||
type Operator string |
||||
|
||||
// Define operators for syscall arguments in Seccomp
|
||||
const ( |
||||
OpNotEqual Operator = "SCMP_CMP_NE" |
||||
OpLessThan Operator = "SCMP_CMP_LT" |
||||
OpLessEqual Operator = "SCMP_CMP_LE" |
||||
OpEqualTo Operator = "SCMP_CMP_EQ" |
||||
OpGreaterEqual Operator = "SCMP_CMP_GE" |
||||
OpGreaterThan Operator = "SCMP_CMP_GT" |
||||
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" |
||||
) |
||||
|
||||
// Arg used for matching specific syscall arguments in Seccomp
|
||||
type Arg struct { |
||||
Index uint `json:"index"` |
||||
Value uint64 `json:"value"` |
||||
ValueTwo uint64 `json:"valueTwo"` |
||||
Op Operator `json:"op"` |
||||
} |
||||
|
||||
// Filter is used to conditionally apply Seccomp rules
|
||||
type Filter struct { |
||||
Caps []string `json:"caps,omitempty"` |
||||
Arches []string `json:"arches,omitempty"` |
||||
MinKernel string `json:"minKernel,omitempty"` |
||||
} |
||||
|
||||
// Syscall is used to match a group of syscalls in Seccomp
|
||||
type Syscall struct { |
||||
Name string `json:"name,omitempty"` |
||||
Names []string `json:"names,omitempty"` |
||||
Action Action `json:"action"` |
||||
Args []*Arg `json:"args"` |
||||
Comment string `json:"comment"` |
||||
Includes Filter `json:"includes"` |
||||
Excludes Filter `json:"excludes"` |
||||
} |
@ -0,0 +1,12 @@ |
||||
package types |
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// ServiceUpdateResponse service update response
|
||||
// swagger:model ServiceUpdateResponse
|
||||
type ServiceUpdateResponse struct { |
||||
|
||||
// Optional warning messages
|
||||
Warnings []string `json:"Warnings"` |
||||
} |
@ -0,0 +1,181 @@ |
||||
// Package types is used for API stability in the types and response to the
|
||||
// consumers of the API stats endpoint.
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import "time" |
||||
|
||||
// ThrottlingData stores CPU throttling stats of one running container.
|
||||
// Not used on Windows.
|
||||
type ThrottlingData struct { |
||||
// Number of periods with throttling active
|
||||
Periods uint64 `json:"periods"` |
||||
// Number of periods when the container hits its throttling limit.
|
||||
ThrottledPeriods uint64 `json:"throttled_periods"` |
||||
// Aggregate time the container was throttled for in nanoseconds.
|
||||
ThrottledTime uint64 `json:"throttled_time"` |
||||
} |
||||
|
||||
// CPUUsage stores All CPU stats aggregated since container inception.
|
||||
type CPUUsage struct { |
||||
// Total CPU time consumed.
|
||||
// Units: nanoseconds (Linux)
|
||||
// Units: 100's of nanoseconds (Windows)
|
||||
TotalUsage uint64 `json:"total_usage"` |
||||
|
||||
// Total CPU time consumed per core (Linux). Not used on Windows.
|
||||
// Units: nanoseconds.
|
||||
PercpuUsage []uint64 `json:"percpu_usage,omitempty"` |
||||
|
||||
// Time spent by tasks of the cgroup in kernel mode (Linux).
|
||||
// Time spent by all container processes in kernel mode (Windows).
|
||||
// Units: nanoseconds (Linux).
|
||||
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
|
||||
UsageInKernelmode uint64 `json:"usage_in_kernelmode"` |
||||
|
||||
// Time spent by tasks of the cgroup in user mode (Linux).
|
||||
// Time spent by all container processes in user mode (Windows).
|
||||
// Units: nanoseconds (Linux).
|
||||
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
|
||||
UsageInUsermode uint64 `json:"usage_in_usermode"` |
||||
} |
||||
|
||||
// CPUStats aggregates and wraps all CPU related info of container
|
||||
type CPUStats struct { |
||||
// CPU Usage. Linux and Windows.
|
||||
CPUUsage CPUUsage `json:"cpu_usage"` |
||||
|
||||
// System Usage. Linux only.
|
||||
SystemUsage uint64 `json:"system_cpu_usage,omitempty"` |
||||
|
||||
// Online CPUs. Linux only.
|
||||
OnlineCPUs uint32 `json:"online_cpus,omitempty"` |
||||
|
||||
// Throttling Data. Linux only.
|
||||
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` |
||||
} |
||||
|
||||
// MemoryStats aggregates all memory stats since container inception on Linux.
|
||||
// Windows returns stats for commit and private working set only.
|
||||
type MemoryStats struct { |
||||
// Linux Memory Stats
|
||||
|
||||
// current res_counter usage for memory
|
||||
Usage uint64 `json:"usage,omitempty"` |
||||
// maximum usage ever recorded.
|
||||
MaxUsage uint64 `json:"max_usage,omitempty"` |
||||
// TODO(vishh): Export these as stronger types.
|
||||
// all the stats exported via memory.stat.
|
||||
Stats map[string]uint64 `json:"stats,omitempty"` |
||||
// number of times memory usage hits limits.
|
||||
Failcnt uint64 `json:"failcnt,omitempty"` |
||||
Limit uint64 `json:"limit,omitempty"` |
||||
|
||||
// Windows Memory Stats
|
||||
// See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
|
||||
|
||||
// committed bytes
|
||||
Commit uint64 `json:"commitbytes,omitempty"` |
||||
// peak committed bytes
|
||||
CommitPeak uint64 `json:"commitpeakbytes,omitempty"` |
||||
// private working set
|
||||
PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` |
||||
} |
||||
|
||||
// BlkioStatEntry is one small entity to store a piece of Blkio stats
|
||||
// Not used on Windows.
|
||||
type BlkioStatEntry struct { |
||||
Major uint64 `json:"major"` |
||||
Minor uint64 `json:"minor"` |
||||
Op string `json:"op"` |
||||
Value uint64 `json:"value"` |
||||
} |
||||
|
||||
// BlkioStats stores All IO service stats for data read and write.
|
||||
// This is a Linux specific structure as the differences between expressing
|
||||
// block I/O on Windows and Linux are sufficiently significant to make
|
||||
// little sense attempting to morph into a combined structure.
|
||||
type BlkioStats struct { |
||||
// number of bytes transferred to and from the block device
|
||||
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` |
||||
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` |
||||
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` |
||||
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` |
||||
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` |
||||
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` |
||||
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` |
||||
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` |
||||
} |
||||
|
||||
// StorageStats is the disk I/O stats for read/write on Windows.
|
||||
type StorageStats struct { |
||||
ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` |
||||
ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` |
||||
WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` |
||||
WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` |
||||
} |
||||
|
||||
// NetworkStats aggregates the network stats of one container
|
||||
type NetworkStats struct { |
||||
// Bytes received. Windows and Linux.
|
||||
RxBytes uint64 `json:"rx_bytes"` |
||||
// Packets received. Windows and Linux.
|
||||
RxPackets uint64 `json:"rx_packets"` |
||||
// Received errors. Not used on Windows. Note that we don't `omitempty` this
|
||||
// field as it is expected in the >=v1.21 API stats structure.
|
||||
RxErrors uint64 `json:"rx_errors"` |
||||
// Incoming packets dropped. Windows and Linux.
|
||||
RxDropped uint64 `json:"rx_dropped"` |
||||
// Bytes sent. Windows and Linux.
|
||||
TxBytes uint64 `json:"tx_bytes"` |
||||
// Packets sent. Windows and Linux.
|
||||
TxPackets uint64 `json:"tx_packets"` |
||||
// Sent errors. Not used on Windows. Note that we don't `omitempty` this
|
||||
// field as it is expected in the >=v1.21 API stats structure.
|
||||
TxErrors uint64 `json:"tx_errors"` |
||||
// Outgoing packets dropped. Windows and Linux.
|
||||
TxDropped uint64 `json:"tx_dropped"` |
||||
// Endpoint ID. Not used on Linux.
|
||||
EndpointID string `json:"endpoint_id,omitempty"` |
||||
// Instance ID. Not used on Linux.
|
||||
InstanceID string `json:"instance_id,omitempty"` |
||||
} |
||||
|
||||
// PidsStats contains the stats of a container's pids
|
||||
type PidsStats struct { |
||||
// Current is the number of pids in the cgroup
|
||||
Current uint64 `json:"current,omitempty"` |
||||
// Limit is the hard limit on the number of pids in the cgroup.
|
||||
// A "Limit" of 0 means that there is no limit.
|
||||
Limit uint64 `json:"limit,omitempty"` |
||||
} |
||||
|
||||
// Stats is Ultimate struct aggregating all types of stats of one container
|
||||
type Stats struct { |
||||
// Common stats
|
||||
Read time.Time `json:"read"` |
||||
PreRead time.Time `json:"preread"` |
||||
|
||||
// Linux specific stats, not populated on Windows.
|
||||
PidsStats PidsStats `json:"pids_stats,omitempty"` |
||||
BlkioStats BlkioStats `json:"blkio_stats,omitempty"` |
||||
|
||||
// Windows specific stats, not populated on Linux.
|
||||
NumProcs uint32 `json:"num_procs"` |
||||
StorageStats StorageStats `json:"storage_stats,omitempty"` |
||||
|
||||
// Shared stats
|
||||
CPUStats CPUStats `json:"cpu_stats,omitempty"` |
||||
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
|
||||
MemoryStats MemoryStats `json:"memory_stats,omitempty"` |
||||
} |
||||
|
||||
// StatsJSON is newly used Networks
|
||||
type StatsJSON struct { |
||||
Stats |
||||
|
||||
Name string `json:"name,omitempty"` |
||||
ID string `json:"id,omitempty"` |
||||
|
||||
// Networks request version >=1.21
|
||||
Networks map[string]NetworkStats `json:"networks,omitempty"` |
||||
} |
@ -0,0 +1,30 @@ |
||||
package strslice // import "github.com/docker/docker/api/types/strslice"
|
||||
|
||||
import "encoding/json" |
||||
|
||||
// StrSlice represents a string or an array of strings.
|
||||
// We need to override the json decoder to accept both options.
|
||||
type StrSlice []string |
||||
|
||||
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
|
||||
// strings. This method is needed to implement json.Unmarshaler.
|
||||
func (e *StrSlice) UnmarshalJSON(b []byte) error { |
||||
if len(b) == 0 { |
||||
// With no input, we preserve the existing value by returning nil and
|
||||
// leaving the target alone. This allows defining default values for
|
||||
// the type.
|
||||
return nil |
||||
} |
||||
|
||||
p := make([]string, 0, 1) |
||||
if err := json.Unmarshal(b, &p); err != nil { |
||||
var s string |
||||
if err := json.Unmarshal(b, &s); err != nil { |
||||
return err |
||||
} |
||||
p = append(p, s) |
||||
} |
||||
|
||||
*e = p |
||||
return nil |
||||
} |
@ -0,0 +1,40 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "time" |
||||
|
||||
// Version represents the internal object version.
|
||||
type Version struct { |
||||
Index uint64 `json:",omitempty"` |
||||
} |
||||
|
||||
// Meta is a base object inherited by most of the other once.
|
||||
type Meta struct { |
||||
Version Version `json:",omitempty"` |
||||
CreatedAt time.Time `json:",omitempty"` |
||||
UpdatedAt time.Time `json:",omitempty"` |
||||
} |
||||
|
||||
// Annotations represents how to describe an object.
|
||||
type Annotations struct { |
||||
Name string `json:",omitempty"` |
||||
Labels map[string]string `json:"Labels"` |
||||
} |
||||
|
||||
// Driver represents a driver (network, logging, secrets backend).
|
||||
type Driver struct { |
||||
Name string `json:",omitempty"` |
||||
Options map[string]string `json:",omitempty"` |
||||
} |
||||
|
||||
// TLSInfo represents the TLS information about what CA certificate is trusted,
|
||||
// and who the issuer for a TLS certificate is
|
||||
type TLSInfo struct { |
||||
// TrustRoot is the trusted CA root certificate in PEM format
|
||||
TrustRoot string `json:",omitempty"` |
||||
|
||||
// CertIssuer is the raw subject bytes of the issuer
|
||||
CertIssuerSubject []byte `json:",omitempty"` |
||||
|
||||
// CertIssuerPublicKey is the raw public key bytes of the issuer
|
||||
CertIssuerPublicKey []byte `json:",omitempty"` |
||||
} |
@ -0,0 +1,40 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "os" |
||||
|
||||
// Config represents a config.
|
||||
type Config struct { |
||||
ID string |
||||
Meta |
||||
Spec ConfigSpec |
||||
} |
||||
|
||||
// ConfigSpec represents a config specification from a config in swarm
|
||||
type ConfigSpec struct { |
||||
Annotations |
||||
Data []byte `json:",omitempty"` |
||||
|
||||
// Templating controls whether and how to evaluate the config payload as
|
||||
// a template. If it is not set, no templating is used.
|
||||
Templating *Driver `json:",omitempty"` |
||||
} |
||||
|
||||
// ConfigReferenceFileTarget is a file target in a config reference
|
||||
type ConfigReferenceFileTarget struct { |
||||
Name string |
||||
UID string |
||||
GID string |
||||
Mode os.FileMode |
||||
} |
||||
|
||||
// ConfigReferenceRuntimeTarget is a target for a config specifying that it
|
||||
// isn't mounted into the container but instead has some other purpose.
|
||||
type ConfigReferenceRuntimeTarget struct{} |
||||
|
||||
// ConfigReference is a reference to a config in swarm
|
||||
type ConfigReference struct { |
||||
File *ConfigReferenceFileTarget `json:",omitempty"` |
||||
Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` |
||||
ConfigID string |
||||
ConfigName string |
||||
} |
@ -0,0 +1,77 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/docker/docker/api/types/container" |
||||
"github.com/docker/docker/api/types/mount" |
||||
) |
||||
|
||||
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
|
||||
// Detailed documentation is available in:
|
||||
// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
|
||||
// `nameserver`, `search`, `options` have been supported.
|
||||
// TODO: `domain` is not supported yet.
|
||||
type DNSConfig struct { |
||||
// Nameservers specifies the IP addresses of the name servers
|
||||
Nameservers []string `json:",omitempty"` |
||||
// Search specifies the search list for host-name lookup
|
||||
Search []string `json:",omitempty"` |
||||
// Options allows certain internal resolver variables to be modified
|
||||
Options []string `json:",omitempty"` |
||||
} |
||||
|
||||
// SELinuxContext contains the SELinux labels of the container.
|
||||
type SELinuxContext struct { |
||||
Disable bool |
||||
|
||||
User string |
||||
Role string |
||||
Type string |
||||
Level string |
||||
} |
||||
|
||||
// CredentialSpec for managed service account (Windows only)
|
||||
type CredentialSpec struct { |
||||
Config string |
||||
File string |
||||
Registry string |
||||
} |
||||
|
||||
// Privileges defines the security options for the container.
|
||||
type Privileges struct { |
||||
CredentialSpec *CredentialSpec |
||||
SELinuxContext *SELinuxContext |
||||
} |
||||
|
||||
// ContainerSpec represents the spec of a container.
|
||||
type ContainerSpec struct { |
||||
Image string `json:",omitempty"` |
||||
Labels map[string]string `json:",omitempty"` |
||||
Command []string `json:",omitempty"` |
||||
Args []string `json:",omitempty"` |
||||
Hostname string `json:",omitempty"` |
||||
Env []string `json:",omitempty"` |
||||
Dir string `json:",omitempty"` |
||||
User string `json:",omitempty"` |
||||
Groups []string `json:",omitempty"` |
||||
Privileges *Privileges `json:",omitempty"` |
||||
Init *bool `json:",omitempty"` |
||||
StopSignal string `json:",omitempty"` |
||||
TTY bool `json:",omitempty"` |
||||
OpenStdin bool `json:",omitempty"` |
||||
ReadOnly bool `json:",omitempty"` |
||||
Mounts []mount.Mount `json:",omitempty"` |
||||
StopGracePeriod *time.Duration `json:",omitempty"` |
||||
Healthcheck *container.HealthConfig `json:",omitempty"` |
||||
// The format of extra hosts on swarmkit is specified in:
|
||||
// http://man7.org/linux/man-pages/man5/hosts.5.html
|
||||
// IP_address canonical_hostname [aliases...]
|
||||
Hosts []string `json:",omitempty"` |
||||
DNSConfig *DNSConfig `json:",omitempty"` |
||||
Secrets []*SecretReference `json:",omitempty"` |
||||
Configs []*ConfigReference `json:",omitempty"` |
||||
Isolation container.Isolation `json:",omitempty"` |
||||
Sysctls map[string]string `json:",omitempty"` |
||||
Capabilities []string `json:",omitempty"` |
||||
} |
@ -0,0 +1,121 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import ( |
||||
"github.com/docker/docker/api/types/network" |
||||
) |
||||
|
||||
// Endpoint represents an endpoint.
|
||||
type Endpoint struct { |
||||
Spec EndpointSpec `json:",omitempty"` |
||||
Ports []PortConfig `json:",omitempty"` |
||||
VirtualIPs []EndpointVirtualIP `json:",omitempty"` |
||||
} |
||||
|
||||
// EndpointSpec represents the spec of an endpoint.
|
||||
type EndpointSpec struct { |
||||
Mode ResolutionMode `json:",omitempty"` |
||||
Ports []PortConfig `json:",omitempty"` |
||||
} |
||||
|
||||
// ResolutionMode represents a resolution mode.
|
||||
type ResolutionMode string |
||||
|
||||
const ( |
||||
// ResolutionModeVIP VIP
|
||||
ResolutionModeVIP ResolutionMode = "vip" |
||||
// ResolutionModeDNSRR DNSRR
|
||||
ResolutionModeDNSRR ResolutionMode = "dnsrr" |
||||
) |
||||
|
||||
// PortConfig represents the config of a port.
|
||||
type PortConfig struct { |
||||
Name string `json:",omitempty"` |
||||
Protocol PortConfigProtocol `json:",omitempty"` |
||||
// TargetPort is the port inside the container
|
||||
TargetPort uint32 `json:",omitempty"` |
||||
// PublishedPort is the port on the swarm hosts
|
||||
PublishedPort uint32 `json:",omitempty"` |
||||
// PublishMode is the mode in which port is published
|
||||
PublishMode PortConfigPublishMode `json:",omitempty"` |
||||
} |
||||
|
||||
// PortConfigPublishMode represents the mode in which the port is to
|
||||
// be published.
|
||||
type PortConfigPublishMode string |
||||
|
||||
const ( |
||||
// PortConfigPublishModeIngress is used for ports published
|
||||
// for ingress load balancing using routing mesh.
|
||||
PortConfigPublishModeIngress PortConfigPublishMode = "ingress" |
||||
// PortConfigPublishModeHost is used for ports published
|
||||
// for direct host level access on the host where the task is running.
|
||||
PortConfigPublishModeHost PortConfigPublishMode = "host" |
||||
) |
||||
|
||||
// PortConfigProtocol represents the protocol of a port.
|
||||
type PortConfigProtocol string |
||||
|
||||
const ( |
||||
// TODO(stevvooe): These should be used generally, not just for PortConfig.
|
||||
|
||||
// PortConfigProtocolTCP TCP
|
||||
PortConfigProtocolTCP PortConfigProtocol = "tcp" |
||||
// PortConfigProtocolUDP UDP
|
||||
PortConfigProtocolUDP PortConfigProtocol = "udp" |
||||
// PortConfigProtocolSCTP SCTP
|
||||
PortConfigProtocolSCTP PortConfigProtocol = "sctp" |
||||
) |
||||
|
||||
// EndpointVirtualIP represents the virtual ip of a port.
|
||||
type EndpointVirtualIP struct { |
||||
NetworkID string `json:",omitempty"` |
||||
Addr string `json:",omitempty"` |
||||
} |
||||
|
||||
// Network represents a network.
|
||||
type Network struct { |
||||
ID string |
||||
Meta |
||||
Spec NetworkSpec `json:",omitempty"` |
||||
DriverState Driver `json:",omitempty"` |
||||
IPAMOptions *IPAMOptions `json:",omitempty"` |
||||
} |
||||
|
||||
// NetworkSpec represents the spec of a network.
|
||||
type NetworkSpec struct { |
||||
Annotations |
||||
DriverConfiguration *Driver `json:",omitempty"` |
||||
IPv6Enabled bool `json:",omitempty"` |
||||
Internal bool `json:",omitempty"` |
||||
Attachable bool `json:",omitempty"` |
||||
Ingress bool `json:",omitempty"` |
||||
IPAMOptions *IPAMOptions `json:",omitempty"` |
||||
ConfigFrom *network.ConfigReference `json:",omitempty"` |
||||
Scope string `json:",omitempty"` |
||||
} |
||||
|
||||
// NetworkAttachmentConfig represents the configuration of a network attachment.
|
||||
type NetworkAttachmentConfig struct { |
||||
Target string `json:",omitempty"` |
||||
Aliases []string `json:",omitempty"` |
||||
DriverOpts map[string]string `json:",omitempty"` |
||||
} |
||||
|
||||
// NetworkAttachment represents a network attachment.
|
||||
type NetworkAttachment struct { |
||||
Network Network `json:",omitempty"` |
||||
Addresses []string `json:",omitempty"` |
||||
} |
||||
|
||||
// IPAMOptions represents ipam options.
|
||||
type IPAMOptions struct { |
||||
Driver Driver `json:",omitempty"` |
||||
Configs []IPAMConfig `json:",omitempty"` |
||||
} |
||||
|
||||
// IPAMConfig represents ipam configuration.
|
||||
type IPAMConfig struct { |
||||
Subnet string `json:",omitempty"` |
||||
Range string `json:",omitempty"` |
||||
Gateway string `json:",omitempty"` |
||||
} |
@ -0,0 +1,115 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
// Node represents a node.
|
||||
type Node struct { |
||||
ID string |
||||
Meta |
||||
// Spec defines the desired state of the node as specified by the user.
|
||||
// The system will honor this and will *never* modify it.
|
||||
Spec NodeSpec `json:",omitempty"` |
||||
// Description encapsulates the properties of the Node as reported by the
|
||||
// agent.
|
||||
Description NodeDescription `json:",omitempty"` |
||||
// Status provides the current status of the node, as seen by the manager.
|
||||
Status NodeStatus `json:",omitempty"` |
||||
// ManagerStatus provides the current status of the node's manager
|
||||
// component, if the node is a manager.
|
||||
ManagerStatus *ManagerStatus `json:",omitempty"` |
||||
} |
||||
|
||||
// NodeSpec represents the spec of a node.
|
||||
type NodeSpec struct { |
||||
Annotations |
||||
Role NodeRole `json:",omitempty"` |
||||
Availability NodeAvailability `json:",omitempty"` |
||||
} |
||||
|
||||
// NodeRole represents the role of a node.
|
||||
type NodeRole string |
||||
|
||||
const ( |
||||
// NodeRoleWorker WORKER
|
||||
NodeRoleWorker NodeRole = "worker" |
||||
// NodeRoleManager MANAGER
|
||||
NodeRoleManager NodeRole = "manager" |
||||
) |
||||
|
||||
// NodeAvailability represents the availability of a node.
|
||||
type NodeAvailability string |
||||
|
||||
const ( |
||||
// NodeAvailabilityActive ACTIVE
|
||||
NodeAvailabilityActive NodeAvailability = "active" |
||||
// NodeAvailabilityPause PAUSE
|
||||
NodeAvailabilityPause NodeAvailability = "pause" |
||||
// NodeAvailabilityDrain DRAIN
|
||||
NodeAvailabilityDrain NodeAvailability = "drain" |
||||
) |
||||
|
||||
// NodeDescription represents the description of a node.
|
||||
type NodeDescription struct { |
||||
Hostname string `json:",omitempty"` |
||||
Platform Platform `json:",omitempty"` |
||||
Resources Resources `json:",omitempty"` |
||||
Engine EngineDescription `json:",omitempty"` |
||||
TLSInfo TLSInfo `json:",omitempty"` |
||||
} |
||||
|
||||
// Platform represents the platform (Arch/OS).
|
||||
type Platform struct { |
||||
Architecture string `json:",omitempty"` |
||||
OS string `json:",omitempty"` |
||||
} |
||||
|
||||
// EngineDescription represents the description of an engine.
|
||||
type EngineDescription struct { |
||||
EngineVersion string `json:",omitempty"` |
||||
Labels map[string]string `json:",omitempty"` |
||||
Plugins []PluginDescription `json:",omitempty"` |
||||
} |
||||
|
||||
// PluginDescription represents the description of an engine plugin.
|
||||
type PluginDescription struct { |
||||
Type string `json:",omitempty"` |
||||
Name string `json:",omitempty"` |
||||
} |
||||
|
||||
// NodeStatus represents the status of a node.
|
||||
type NodeStatus struct { |
||||
State NodeState `json:",omitempty"` |
||||
Message string `json:",omitempty"` |
||||
Addr string `json:",omitempty"` |
||||
} |
||||
|
||||
// Reachability represents the reachability of a node.
|
||||
type Reachability string |
||||
|
||||
const ( |
||||
// ReachabilityUnknown UNKNOWN
|
||||
ReachabilityUnknown Reachability = "unknown" |
||||
// ReachabilityUnreachable UNREACHABLE
|
||||
ReachabilityUnreachable Reachability = "unreachable" |
||||
// ReachabilityReachable REACHABLE
|
||||
ReachabilityReachable Reachability = "reachable" |
||||
) |
||||
|
||||
// ManagerStatus represents the status of a manager.
|
||||
type ManagerStatus struct { |
||||
Leader bool `json:",omitempty"` |
||||
Reachability Reachability `json:",omitempty"` |
||||
Addr string `json:",omitempty"` |
||||
} |
||||
|
||||
// NodeState represents the state of a node.
|
||||
type NodeState string |
||||
|
||||
const ( |
||||
// NodeStateUnknown UNKNOWN
|
||||
NodeStateUnknown NodeState = "unknown" |
||||
// NodeStateDown DOWN
|
||||
NodeStateDown NodeState = "down" |
||||
// NodeStateReady READY
|
||||
NodeStateReady NodeState = "ready" |
||||
// NodeStateDisconnected DISCONNECTED
|
||||
NodeStateDisconnected NodeState = "disconnected" |
||||
) |
@ -0,0 +1,27 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
// RuntimeType is the type of runtime used for the TaskSpec
|
||||
type RuntimeType string |
||||
|
||||
// RuntimeURL is the proto type url
|
||||
type RuntimeURL string |
||||
|
||||
const ( |
||||
// RuntimeContainer is the container based runtime
|
||||
RuntimeContainer RuntimeType = "container" |
||||
// RuntimePlugin is the plugin based runtime
|
||||
RuntimePlugin RuntimeType = "plugin" |
||||
// RuntimeNetworkAttachment is the network attachment runtime
|
||||
RuntimeNetworkAttachment RuntimeType = "attachment" |
||||
|
||||
// RuntimeURLContainer is the proto url for the container type
|
||||
RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" |
||||
// RuntimeURLPlugin is the proto url for the plugin type
|
||||
RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" |
||||
) |
||||
|
||||
// NetworkAttachmentSpec represents the runtime spec type for network
|
||||
// attachment tasks
|
||||
type NetworkAttachmentSpec struct { |
||||
ContainerID string |
||||
} |
@ -0,0 +1,3 @@ |
||||
//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto
|
||||
|
||||
package runtime // import "github.com/docker/docker/api/types/swarm/runtime"
|
@ -0,0 +1,754 @@ |
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: plugin.proto
|
||||
|
||||
/* |
||||
Package runtime is a generated protocol buffer package. |
||||
|
||||
It is generated from these files: |
||||
plugin.proto |
||||
|
||||
It has these top-level messages: |
||||
PluginSpec |
||||
PluginPrivilege |
||||
*/ |
||||
package runtime |
||||
|
||||
import proto "github.com/gogo/protobuf/proto" |
||||
import fmt "fmt" |
||||
import math "math" |
||||
|
||||
import io "io" |
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal |
||||
var _ = fmt.Errorf |
||||
var _ = math.Inf |
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
// PluginSpec defines the base payload which clients can specify for creating
|
||||
// a service with the plugin runtime.
|
||||
type PluginSpec struct { |
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
||||
Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` |
||||
Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` |
||||
Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` |
||||
Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` |
||||
} |
||||
|
||||
func (m *PluginSpec) Reset() { *m = PluginSpec{} } |
||||
func (m *PluginSpec) String() string { return proto.CompactTextString(m) } |
||||
func (*PluginSpec) ProtoMessage() {} |
||||
func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } |
||||
|
||||
func (m *PluginSpec) GetName() string { |
||||
if m != nil { |
||||
return m.Name |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *PluginSpec) GetRemote() string { |
||||
if m != nil { |
||||
return m.Remote |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { |
||||
if m != nil { |
||||
return m.Privileges |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func (m *PluginSpec) GetDisabled() bool { |
||||
if m != nil { |
||||
return m.Disabled |
||||
} |
||||
return false |
||||
} |
||||
|
||||
func (m *PluginSpec) GetEnv() []string { |
||||
if m != nil { |
||||
return m.Env |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
// PluginPrivilege describes a permission the user has to accept
|
||||
// upon installing a plugin.
|
||||
type PluginPrivilege struct { |
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
||||
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` |
||||
Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` |
||||
} |
||||
|
||||
func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } |
||||
func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } |
||||
func (*PluginPrivilege) ProtoMessage() {} |
||||
func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } |
||||
|
||||
func (m *PluginPrivilege) GetName() string { |
||||
if m != nil { |
||||
return m.Name |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *PluginPrivilege) GetDescription() string { |
||||
if m != nil { |
||||
return m.Description |
||||
} |
||||
return "" |
||||
} |
||||
|
||||
func (m *PluginPrivilege) GetValue() []string { |
||||
if m != nil { |
||||
return m.Value |
||||
} |
||||
return nil |
||||
} |
||||
|
||||
func init() { |
||||
proto.RegisterType((*PluginSpec)(nil), "PluginSpec") |
||||
proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") |
||||
} |
||||
func (m *PluginSpec) Marshal() (dAtA []byte, err error) { |
||||
size := m.Size() |
||||
dAtA = make([]byte, size) |
||||
n, err := m.MarshalTo(dAtA) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return dAtA[:n], nil |
||||
} |
||||
|
||||
func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { |
||||
var i int |
||||
_ = i |
||||
var l int |
||||
_ = l |
||||
if len(m.Name) > 0 { |
||||
dAtA[i] = 0xa |
||||
i++ |
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) |
||||
i += copy(dAtA[i:], m.Name) |
||||
} |
||||
if len(m.Remote) > 0 { |
||||
dAtA[i] = 0x12 |
||||
i++ |
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) |
||||
i += copy(dAtA[i:], m.Remote) |
||||
} |
||||
if len(m.Privileges) > 0 { |
||||
for _, msg := range m.Privileges { |
||||
dAtA[i] = 0x1a |
||||
i++ |
||||
i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) |
||||
n, err := msg.MarshalTo(dAtA[i:]) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
i += n |
||||
} |
||||
} |
||||
if m.Disabled { |
||||
dAtA[i] = 0x20 |
||||
i++ |
||||
if m.Disabled { |
||||
dAtA[i] = 1 |
||||
} else { |
||||
dAtA[i] = 0 |
||||
} |
||||
i++ |
||||
} |
||||
if len(m.Env) > 0 { |
||||
for _, s := range m.Env { |
||||
dAtA[i] = 0x2a |
||||
i++ |
||||
l = len(s) |
||||
for l >= 1<<7 { |
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80) |
||||
l >>= 7 |
||||
i++ |
||||
} |
||||
dAtA[i] = uint8(l) |
||||
i++ |
||||
i += copy(dAtA[i:], s) |
||||
} |
||||
} |
||||
return i, nil |
||||
} |
||||
|
||||
func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { |
||||
size := m.Size() |
||||
dAtA = make([]byte, size) |
||||
n, err := m.MarshalTo(dAtA) |
||||
if err != nil { |
||||
return nil, err |
||||
} |
||||
return dAtA[:n], nil |
||||
} |
||||
|
||||
func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { |
||||
var i int |
||||
_ = i |
||||
var l int |
||||
_ = l |
||||
if len(m.Name) > 0 { |
||||
dAtA[i] = 0xa |
||||
i++ |
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) |
||||
i += copy(dAtA[i:], m.Name) |
||||
} |
||||
if len(m.Description) > 0 { |
||||
dAtA[i] = 0x12 |
||||
i++ |
||||
i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) |
||||
i += copy(dAtA[i:], m.Description) |
||||
} |
||||
if len(m.Value) > 0 { |
||||
for _, s := range m.Value { |
||||
dAtA[i] = 0x1a |
||||
i++ |
||||
l = len(s) |
||||
for l >= 1<<7 { |
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80) |
||||
l >>= 7 |
||||
i++ |
||||
} |
||||
dAtA[i] = uint8(l) |
||||
i++ |
||||
i += copy(dAtA[i:], s) |
||||
} |
||||
} |
||||
return i, nil |
||||
} |
||||
|
||||
func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { |
||||
for v >= 1<<7 { |
||||
dAtA[offset] = uint8(v&0x7f | 0x80) |
||||
v >>= 7 |
||||
offset++ |
||||
} |
||||
dAtA[offset] = uint8(v) |
||||
return offset + 1 |
||||
} |
||||
func (m *PluginSpec) Size() (n int) { |
||||
var l int |
||||
_ = l |
||||
l = len(m.Name) |
||||
if l > 0 { |
||||
n += 1 + l + sovPlugin(uint64(l)) |
||||
} |
||||
l = len(m.Remote) |
||||
if l > 0 { |
||||
n += 1 + l + sovPlugin(uint64(l)) |
||||
} |
||||
if len(m.Privileges) > 0 { |
||||
for _, e := range m.Privileges { |
||||
l = e.Size() |
||||
n += 1 + l + sovPlugin(uint64(l)) |
||||
} |
||||
} |
||||
if m.Disabled { |
||||
n += 2 |
||||
} |
||||
if len(m.Env) > 0 { |
||||
for _, s := range m.Env { |
||||
l = len(s) |
||||
n += 1 + l + sovPlugin(uint64(l)) |
||||
} |
||||
} |
||||
return n |
||||
} |
||||
|
||||
func (m *PluginPrivilege) Size() (n int) { |
||||
var l int |
||||
_ = l |
||||
l = len(m.Name) |
||||
if l > 0 { |
||||
n += 1 + l + sovPlugin(uint64(l)) |
||||
} |
||||
l = len(m.Description) |
||||
if l > 0 { |
||||
n += 1 + l + sovPlugin(uint64(l)) |
||||
} |
||||
if len(m.Value) > 0 { |
||||
for _, s := range m.Value { |
||||
l = len(s) |
||||
n += 1 + l + sovPlugin(uint64(l)) |
||||
} |
||||
} |
||||
return n |
||||
} |
||||
|
||||
func sovPlugin(x uint64) (n int) { |
||||
for { |
||||
n++ |
||||
x >>= 7 |
||||
if x == 0 { |
||||
break |
||||
} |
||||
} |
||||
return n |
||||
} |
||||
func sozPlugin(x uint64) (n int) { |
||||
return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) |
||||
} |
||||
func (m *PluginSpec) Unmarshal(dAtA []byte) error { |
||||
l := len(dAtA) |
||||
iNdEx := 0 |
||||
for iNdEx < l { |
||||
preIndex := iNdEx |
||||
var wire uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
wire |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
fieldNum := int32(wire >> 3) |
||||
wireType := int(wire & 0x7) |
||||
if wireType == 4 { |
||||
return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") |
||||
} |
||||
if fieldNum <= 0 { |
||||
return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) |
||||
} |
||||
switch fieldNum { |
||||
case 1: |
||||
if wireType != 2 { |
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) |
||||
} |
||||
var stringLen uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
stringLen |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
intStringLen := int(stringLen) |
||||
if intStringLen < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
postIndex := iNdEx + intStringLen |
||||
if postIndex > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
m.Name = string(dAtA[iNdEx:postIndex]) |
||||
iNdEx = postIndex |
||||
case 2: |
||||
if wireType != 2 { |
||||
return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) |
||||
} |
||||
var stringLen uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
stringLen |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
intStringLen := int(stringLen) |
||||
if intStringLen < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
postIndex := iNdEx + intStringLen |
||||
if postIndex > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
m.Remote = string(dAtA[iNdEx:postIndex]) |
||||
iNdEx = postIndex |
||||
case 3: |
||||
if wireType != 2 { |
||||
return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) |
||||
} |
||||
var msglen int |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
msglen |= (int(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
if msglen < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
postIndex := iNdEx + msglen |
||||
if postIndex > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
m.Privileges = append(m.Privileges, &PluginPrivilege{}) |
||||
if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { |
||||
return err |
||||
} |
||||
iNdEx = postIndex |
||||
case 4: |
||||
if wireType != 0 { |
||||
return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) |
||||
} |
||||
var v int |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
v |= (int(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
m.Disabled = bool(v != 0) |
||||
case 5: |
||||
if wireType != 2 { |
||||
return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) |
||||
} |
||||
var stringLen uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
stringLen |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
intStringLen := int(stringLen) |
||||
if intStringLen < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
postIndex := iNdEx + intStringLen |
||||
if postIndex > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) |
||||
iNdEx = postIndex |
||||
default: |
||||
iNdEx = preIndex |
||||
skippy, err := skipPlugin(dAtA[iNdEx:]) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if skippy < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
if (iNdEx + skippy) > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
iNdEx += skippy |
||||
} |
||||
} |
||||
|
||||
if iNdEx > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
return nil |
||||
} |
||||
func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { |
||||
l := len(dAtA) |
||||
iNdEx := 0 |
||||
for iNdEx < l { |
||||
preIndex := iNdEx |
||||
var wire uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
wire |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
fieldNum := int32(wire >> 3) |
||||
wireType := int(wire & 0x7) |
||||
if wireType == 4 { |
||||
return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") |
||||
} |
||||
if fieldNum <= 0 { |
||||
return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) |
||||
} |
||||
switch fieldNum { |
||||
case 1: |
||||
if wireType != 2 { |
||||
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) |
||||
} |
||||
var stringLen uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
stringLen |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
intStringLen := int(stringLen) |
||||
if intStringLen < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
postIndex := iNdEx + intStringLen |
||||
if postIndex > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
m.Name = string(dAtA[iNdEx:postIndex]) |
||||
iNdEx = postIndex |
||||
case 2: |
||||
if wireType != 2 { |
||||
return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) |
||||
} |
||||
var stringLen uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
stringLen |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
intStringLen := int(stringLen) |
||||
if intStringLen < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
postIndex := iNdEx + intStringLen |
||||
if postIndex > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
m.Description = string(dAtA[iNdEx:postIndex]) |
||||
iNdEx = postIndex |
||||
case 3: |
||||
if wireType != 2 { |
||||
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) |
||||
} |
||||
var stringLen uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
stringLen |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
intStringLen := int(stringLen) |
||||
if intStringLen < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
postIndex := iNdEx + intStringLen |
||||
if postIndex > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) |
||||
iNdEx = postIndex |
||||
default: |
||||
iNdEx = preIndex |
||||
skippy, err := skipPlugin(dAtA[iNdEx:]) |
||||
if err != nil { |
||||
return err |
||||
} |
||||
if skippy < 0 { |
||||
return ErrInvalidLengthPlugin |
||||
} |
||||
if (iNdEx + skippy) > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
iNdEx += skippy |
||||
} |
||||
} |
||||
|
||||
if iNdEx > l { |
||||
return io.ErrUnexpectedEOF |
||||
} |
||||
return nil |
||||
} |
||||
func skipPlugin(dAtA []byte) (n int, err error) { |
||||
l := len(dAtA) |
||||
iNdEx := 0 |
||||
for iNdEx < l { |
||||
var wire uint64 |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return 0, ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return 0, io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
wire |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
wireType := int(wire & 0x7) |
||||
switch wireType { |
||||
case 0: |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return 0, ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return 0, io.ErrUnexpectedEOF |
||||
} |
||||
iNdEx++ |
||||
if dAtA[iNdEx-1] < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
return iNdEx, nil |
||||
case 1: |
||||
iNdEx += 8 |
||||
return iNdEx, nil |
||||
case 2: |
||||
var length int |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return 0, ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return 0, io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
length |= (int(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
iNdEx += length |
||||
if length < 0 { |
||||
return 0, ErrInvalidLengthPlugin |
||||
} |
||||
return iNdEx, nil |
||||
case 3: |
||||
for { |
||||
var innerWire uint64 |
||||
var start int = iNdEx |
||||
for shift := uint(0); ; shift += 7 { |
||||
if shift >= 64 { |
||||
return 0, ErrIntOverflowPlugin |
||||
} |
||||
if iNdEx >= l { |
||||
return 0, io.ErrUnexpectedEOF |
||||
} |
||||
b := dAtA[iNdEx] |
||||
iNdEx++ |
||||
innerWire |= (uint64(b) & 0x7F) << shift |
||||
if b < 0x80 { |
||||
break |
||||
} |
||||
} |
||||
innerWireType := int(innerWire & 0x7) |
||||
if innerWireType == 4 { |
||||
break |
||||
} |
||||
next, err := skipPlugin(dAtA[start:]) |
||||
if err != nil { |
||||
return 0, err |
||||
} |
||||
iNdEx = start + next |
||||
} |
||||
return iNdEx, nil |
||||
case 4: |
||||
return iNdEx, nil |
||||
case 5: |
||||
iNdEx += 4 |
||||
return iNdEx, nil |
||||
default: |
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType) |
||||
} |
||||
} |
||||
panic("unreachable") |
||||
} |
||||
|
||||
var ( |
||||
ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") |
||||
ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") |
||||
) |
||||
|
||||
func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } |
||||
|
||||
var fileDescriptorPlugin = []byte{ |
||||
// 256 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, |
||||
0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, |
||||
0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, |
||||
0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, |
||||
0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, |
||||
0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, |
||||
0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, |
||||
0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, |
||||
0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, |
||||
0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, |
||||
0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, |
||||
0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, |
||||
0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, |
||||
0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, |
||||
0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, |
||||
0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, |
||||
} |
@ -0,0 +1,21 @@ |
||||
syntax = "proto3"; |
||||
|
||||
option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; |
||||
|
||||
// PluginSpec defines the base payload which clients can specify for creating |
||||
// a service with the plugin runtime. |
||||
message PluginSpec { |
||||
string name = 1; |
||||
string remote = 2; |
||||
repeated PluginPrivilege privileges = 3; |
||||
bool disabled = 4; |
||||
repeated string env = 5; |
||||
} |
||||
|
||||
// PluginPrivilege describes a permission the user has to accept |
||||
// upon installing a plugin. |
||||
message PluginPrivilege { |
||||
string name = 1; |
||||
string description = 2; |
||||
repeated string value = 3; |
||||
} |
@ -0,0 +1,36 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "os" |
||||
|
||||
// Secret represents a secret.
|
||||
type Secret struct { |
||||
ID string |
||||
Meta |
||||
Spec SecretSpec |
||||
} |
||||
|
||||
// SecretSpec represents a secret specification from a secret in swarm
|
||||
type SecretSpec struct { |
||||
Annotations |
||||
Data []byte `json:",omitempty"` |
||||
Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
|
||||
|
||||
// Templating controls whether and how to evaluate the secret payload as
|
||||
// a template. If it is not set, no templating is used.
|
||||
Templating *Driver `json:",omitempty"` |
||||
} |
||||
|
||||
// SecretReferenceFileTarget is a file target in a secret reference
|
||||
type SecretReferenceFileTarget struct { |
||||
Name string |
||||
UID string |
||||
GID string |
||||
Mode os.FileMode |
||||
} |
||||
|
||||
// SecretReference is a reference to a secret in swarm
|
||||
type SecretReference struct { |
||||
File *SecretReferenceFileTarget |
||||
SecretID string |
||||
SecretName string |
||||
} |
@ -0,0 +1,124 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "time" |
||||
|
||||
// Service represents a service.
|
||||
type Service struct { |
||||
ID string |
||||
Meta |
||||
Spec ServiceSpec `json:",omitempty"` |
||||
PreviousSpec *ServiceSpec `json:",omitempty"` |
||||
Endpoint Endpoint `json:",omitempty"` |
||||
UpdateStatus *UpdateStatus `json:",omitempty"` |
||||
} |
||||
|
||||
// ServiceSpec represents the spec of a service.
|
||||
type ServiceSpec struct { |
||||
Annotations |
||||
|
||||
// TaskTemplate defines how the service should construct new tasks when
|
||||
// orchestrating this service.
|
||||
TaskTemplate TaskSpec `json:",omitempty"` |
||||
Mode ServiceMode `json:",omitempty"` |
||||
UpdateConfig *UpdateConfig `json:",omitempty"` |
||||
RollbackConfig *UpdateConfig `json:",omitempty"` |
||||
|
||||
// Networks field in ServiceSpec is deprecated. The
|
||||
// same field in TaskSpec should be used instead.
|
||||
// This field will be removed in a future release.
|
||||
Networks []NetworkAttachmentConfig `json:",omitempty"` |
||||
EndpointSpec *EndpointSpec `json:",omitempty"` |
||||
} |
||||
|
||||
// ServiceMode represents the mode of a service.
|
||||
type ServiceMode struct { |
||||
Replicated *ReplicatedService `json:",omitempty"` |
||||
Global *GlobalService `json:",omitempty"` |
||||
} |
||||
|
||||
// UpdateState is the state of a service update.
|
||||
type UpdateState string |
||||
|
||||
const ( |
||||
// UpdateStateUpdating is the updating state.
|
||||
UpdateStateUpdating UpdateState = "updating" |
||||
// UpdateStatePaused is the paused state.
|
||||
UpdateStatePaused UpdateState = "paused" |
||||
// UpdateStateCompleted is the completed state.
|
||||
UpdateStateCompleted UpdateState = "completed" |
||||
// UpdateStateRollbackStarted is the state with a rollback in progress.
|
||||
UpdateStateRollbackStarted UpdateState = "rollback_started" |
||||
// UpdateStateRollbackPaused is the state with a rollback in progress.
|
||||
UpdateStateRollbackPaused UpdateState = "rollback_paused" |
||||
// UpdateStateRollbackCompleted is the state with a rollback in progress.
|
||||
UpdateStateRollbackCompleted UpdateState = "rollback_completed" |
||||
) |
||||
|
||||
// UpdateStatus reports the status of a service update.
|
||||
type UpdateStatus struct { |
||||
State UpdateState `json:",omitempty"` |
||||
StartedAt *time.Time `json:",omitempty"` |
||||
CompletedAt *time.Time `json:",omitempty"` |
||||
Message string `json:",omitempty"` |
||||
} |
||||
|
||||
// ReplicatedService is a kind of ServiceMode.
|
||||
type ReplicatedService struct { |
||||
Replicas *uint64 `json:",omitempty"` |
||||
} |
||||
|
||||
// GlobalService is a kind of ServiceMode.
|
||||
type GlobalService struct{} |
||||
|
||||
const ( |
||||
// UpdateFailureActionPause PAUSE
|
||||
UpdateFailureActionPause = "pause" |
||||
// UpdateFailureActionContinue CONTINUE
|
||||
UpdateFailureActionContinue = "continue" |
||||
// UpdateFailureActionRollback ROLLBACK
|
||||
UpdateFailureActionRollback = "rollback" |
||||
|
||||
// UpdateOrderStopFirst STOP_FIRST
|
||||
UpdateOrderStopFirst = "stop-first" |
||||
// UpdateOrderStartFirst START_FIRST
|
||||
UpdateOrderStartFirst = "start-first" |
||||
) |
||||
|
||||
// UpdateConfig represents the update configuration.
|
||||
type UpdateConfig struct { |
||||
// Maximum number of tasks to be updated in one iteration.
|
||||
// 0 means unlimited parallelism.
|
||||
Parallelism uint64 |
||||
|
||||
// Amount of time between updates.
|
||||
Delay time.Duration `json:",omitempty"` |
||||
|
||||
// FailureAction is the action to take when an update failures.
|
||||
FailureAction string `json:",omitempty"` |
||||
|
||||
// Monitor indicates how long to monitor a task for failure after it is
|
||||
// created. If the task fails by ending up in one of the states
|
||||
// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
|
||||
// this counts as a failure. If it fails after Monitor, it does not
|
||||
// count as a failure. If Monitor is unspecified, a default value will
|
||||
// be used.
|
||||
Monitor time.Duration `json:",omitempty"` |
||||
|
||||
// MaxFailureRatio is the fraction of tasks that may fail during
|
||||
// an update before the failure action is invoked. Any task created by
|
||||
// the current update which ends up in one of the states REJECTED,
|
||||
// COMPLETED or FAILED within Monitor from its creation counts as a
|
||||
// failure. The number of failures is divided by the number of tasks
|
||||
// being updated, and if this fraction is greater than
|
||||
// MaxFailureRatio, the failure action is invoked.
|
||||
//
|
||||
// If the failure action is CONTINUE, there is no effect.
|
||||
// If the failure action is PAUSE, no more tasks will be updated until
|
||||
// another update is started.
|
||||
MaxFailureRatio float32 |
||||
|
||||
// Order indicates the order of operations when rolling out an updated
|
||||
// task. Either the old task is shut down before the new task is
|
||||
// started, or the new task is started before the old task is shut down.
|
||||
Order string |
||||
} |
@ -0,0 +1,227 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import ( |
||||
"time" |
||||
) |
||||
|
||||
// ClusterInfo represents info about the cluster for outputting in "info"
|
||||
// it contains the same information as "Swarm", but without the JoinTokens
|
||||
type ClusterInfo struct { |
||||
ID string |
||||
Meta |
||||
Spec Spec |
||||
TLSInfo TLSInfo |
||||
RootRotationInProgress bool |
||||
DefaultAddrPool []string |
||||
SubnetSize uint32 |
||||
DataPathPort uint32 |
||||
} |
||||
|
||||
// Swarm represents a swarm.
|
||||
type Swarm struct { |
||||
ClusterInfo |
||||
JoinTokens JoinTokens |
||||
} |
||||
|
||||
// JoinTokens contains the tokens workers and managers need to join the swarm.
|
||||
type JoinTokens struct { |
||||
// Worker is the join token workers may use to join the swarm.
|
||||
Worker string |
||||
// Manager is the join token managers may use to join the swarm.
|
||||
Manager string |
||||
} |
||||
|
||||
// Spec represents the spec of a swarm.
|
||||
type Spec struct { |
||||
Annotations |
||||
|
||||
Orchestration OrchestrationConfig `json:",omitempty"` |
||||
Raft RaftConfig `json:",omitempty"` |
||||
Dispatcher DispatcherConfig `json:",omitempty"` |
||||
CAConfig CAConfig `json:",omitempty"` |
||||
TaskDefaults TaskDefaults `json:",omitempty"` |
||||
EncryptionConfig EncryptionConfig `json:",omitempty"` |
||||
} |
||||
|
||||
// OrchestrationConfig represents orchestration configuration.
|
||||
type OrchestrationConfig struct { |
||||
// TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
|
||||
// node. If negative, never remove completed or failed tasks.
|
||||
TaskHistoryRetentionLimit *int64 `json:",omitempty"` |
||||
} |
||||
|
||||
// TaskDefaults parameterizes cluster-level task creation with default values.
|
||||
type TaskDefaults struct { |
||||
// LogDriver selects the log driver to use for tasks created in the
|
||||
// orchestrator if unspecified by a service.
|
||||
//
|
||||
// Updating this value will only have an affect on new tasks. Old tasks
|
||||
// will continue use their previously configured log driver until
|
||||
// recreated.
|
||||
LogDriver *Driver `json:",omitempty"` |
||||
} |
||||
|
||||
// EncryptionConfig controls at-rest encryption of data and keys.
|
||||
type EncryptionConfig struct { |
||||
// AutoLockManagers specifies whether or not managers TLS keys and raft data
|
||||
// should be encrypted at rest in such a way that they must be unlocked
|
||||
// before the manager node starts up again.
|
||||
AutoLockManagers bool |
||||
} |
||||
|
||||
// RaftConfig represents raft configuration.
|
||||
type RaftConfig struct { |
||||
// SnapshotInterval is the number of log entries between snapshots.
|
||||
SnapshotInterval uint64 `json:",omitempty"` |
||||
|
||||
// KeepOldSnapshots is the number of snapshots to keep beyond the
|
||||
// current snapshot.
|
||||
KeepOldSnapshots *uint64 `json:",omitempty"` |
||||
|
||||
// LogEntriesForSlowFollowers is the number of log entries to keep
|
||||
// around to sync up slow followers after a snapshot is created.
|
||||
LogEntriesForSlowFollowers uint64 `json:",omitempty"` |
||||
|
||||
// ElectionTick is the number of ticks that a follower will wait for a message
|
||||
// from the leader before becoming a candidate and starting an election.
|
||||
// ElectionTick must be greater than HeartbeatTick.
|
||||
//
|
||||
// A tick currently defaults to one second, so these translate directly to
|
||||
// seconds currently, but this is NOT guaranteed.
|
||||
ElectionTick int |
||||
|
||||
// HeartbeatTick is the number of ticks between heartbeats. Every
|
||||
// HeartbeatTick ticks, the leader will send a heartbeat to the
|
||||
// followers.
|
||||
//
|
||||
// A tick currently defaults to one second, so these translate directly to
|
||||
// seconds currently, but this is NOT guaranteed.
|
||||
HeartbeatTick int |
||||
} |
||||
|
||||
// DispatcherConfig represents dispatcher configuration.
|
||||
type DispatcherConfig struct { |
||||
// HeartbeatPeriod defines how often agent should send heartbeats to
|
||||
// dispatcher.
|
||||
HeartbeatPeriod time.Duration `json:",omitempty"` |
||||
} |
||||
|
||||
// CAConfig represents CA configuration.
|
||||
type CAConfig struct { |
||||
// NodeCertExpiry is the duration certificates should be issued for
|
||||
NodeCertExpiry time.Duration `json:",omitempty"` |
||||
|
||||
// ExternalCAs is a list of CAs to which a manager node will make
|
||||
// certificate signing requests for node certificates.
|
||||
ExternalCAs []*ExternalCA `json:",omitempty"` |
||||
|
||||
// SigningCACert and SigningCAKey specify the desired signing root CA and
|
||||
// root CA key for the swarm. When inspecting the cluster, the key will
|
||||
// be redacted.
|
||||
SigningCACert string `json:",omitempty"` |
||||
SigningCAKey string `json:",omitempty"` |
||||
|
||||
// If this value changes, and there is no specified signing cert and key,
|
||||
// then the swarm is forced to generate a new root certificate ane key.
|
||||
ForceRotate uint64 `json:",omitempty"` |
||||
} |
||||
|
||||
// ExternalCAProtocol represents type of external CA.
|
||||
type ExternalCAProtocol string |
||||
|
||||
// ExternalCAProtocolCFSSL CFSSL
|
||||
const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" |
||||
|
||||
// ExternalCA defines external CA to be used by the cluster.
|
||||
type ExternalCA struct { |
||||
// Protocol is the protocol used by this external CA.
|
||||
Protocol ExternalCAProtocol |
||||
|
||||
// URL is the URL where the external CA can be reached.
|
||||
URL string |
||||
|
||||
// Options is a set of additional key/value pairs whose interpretation
|
||||
// depends on the specified CA type.
|
||||
Options map[string]string `json:",omitempty"` |
||||
|
||||
// CACert specifies which root CA is used by this external CA. This certificate must
|
||||
// be in PEM format.
|
||||
CACert string |
||||
} |
||||
|
||||
// InitRequest is the request used to init a swarm.
|
||||
type InitRequest struct { |
||||
ListenAddr string |
||||
AdvertiseAddr string |
||||
DataPathAddr string |
||||
DataPathPort uint32 |
||||
ForceNewCluster bool |
||||
Spec Spec |
||||
AutoLockManagers bool |
||||
Availability NodeAvailability |
||||
DefaultAddrPool []string |
||||
SubnetSize uint32 |
||||
} |
||||
|
||||
// JoinRequest is the request used to join a swarm.
|
||||
type JoinRequest struct { |
||||
ListenAddr string |
||||
AdvertiseAddr string |
||||
DataPathAddr string |
||||
RemoteAddrs []string |
||||
JoinToken string // accept by secret
|
||||
Availability NodeAvailability |
||||
} |
||||
|
||||
// UnlockRequest is the request used to unlock a swarm.
|
||||
type UnlockRequest struct { |
||||
// UnlockKey is the unlock key in ASCII-armored format.
|
||||
UnlockKey string |
||||
} |
||||
|
||||
// LocalNodeState represents the state of the local node.
|
||||
type LocalNodeState string |
||||
|
||||
const ( |
||||
// LocalNodeStateInactive INACTIVE
|
||||
LocalNodeStateInactive LocalNodeState = "inactive" |
||||
// LocalNodeStatePending PENDING
|
||||
LocalNodeStatePending LocalNodeState = "pending" |
||||
// LocalNodeStateActive ACTIVE
|
||||
LocalNodeStateActive LocalNodeState = "active" |
||||
// LocalNodeStateError ERROR
|
||||
LocalNodeStateError LocalNodeState = "error" |
||||
// LocalNodeStateLocked LOCKED
|
||||
LocalNodeStateLocked LocalNodeState = "locked" |
||||
) |
||||
|
||||
// Info represents generic information about swarm.
|
||||
type Info struct { |
||||
NodeID string |
||||
NodeAddr string |
||||
|
||||
LocalNodeState LocalNodeState |
||||
ControlAvailable bool |
||||
Error string |
||||
|
||||
RemoteManagers []Peer |
||||
Nodes int `json:",omitempty"` |
||||
Managers int `json:",omitempty"` |
||||
|
||||
Cluster *ClusterInfo `json:",omitempty"` |
||||
|
||||
Warnings []string `json:",omitempty"` |
||||
} |
||||
|
||||
// Peer represents a peer.
|
||||
type Peer struct { |
||||
NodeID string |
||||
Addr string |
||||
} |
||||
|
||||
// UpdateFlags contains flags for SwarmUpdate.
|
||||
type UpdateFlags struct { |
||||
RotateWorkerToken bool |
||||
RotateManagerToken bool |
||||
RotateManagerUnlockKey bool |
||||
} |
@ -0,0 +1,192 @@ |
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import ( |
||||
"time" |
||||
|
||||
"github.com/docker/docker/api/types/swarm/runtime" |
||||
) |
||||
|
||||
// TaskState represents the state of a task.
|
||||
type TaskState string |
||||
|
||||
const ( |
||||
// TaskStateNew NEW
|
||||
TaskStateNew TaskState = "new" |
||||
// TaskStateAllocated ALLOCATED
|
||||
TaskStateAllocated TaskState = "allocated" |
||||
// TaskStatePending PENDING
|
||||
TaskStatePending TaskState = "pending" |
||||
// TaskStateAssigned ASSIGNED
|
||||
TaskStateAssigned TaskState = "assigned" |
||||
// TaskStateAccepted ACCEPTED
|
||||
TaskStateAccepted TaskState = "accepted" |
||||
// TaskStatePreparing PREPARING
|
||||
TaskStatePreparing TaskState = "preparing" |
||||
// TaskStateReady READY
|
||||
TaskStateReady TaskState = "ready" |
||||
// TaskStateStarting STARTING
|
||||
TaskStateStarting TaskState = "starting" |
||||
// TaskStateRunning RUNNING
|
||||
TaskStateRunning TaskState = "running" |
||||
// TaskStateComplete COMPLETE
|
||||
TaskStateComplete TaskState = "complete" |
||||
// TaskStateShutdown SHUTDOWN
|
||||
TaskStateShutdown TaskState = "shutdown" |
||||
// TaskStateFailed FAILED
|
||||
TaskStateFailed TaskState = "failed" |
||||
// TaskStateRejected REJECTED
|
||||
TaskStateRejected TaskState = "rejected" |
||||
// TaskStateRemove REMOVE
|
||||
TaskStateRemove TaskState = "remove" |
||||
// TaskStateOrphaned ORPHANED
|
||||
TaskStateOrphaned TaskState = "orphaned" |
||||
) |
||||
|
||||
// Task represents a task.
|
||||
type Task struct { |
||||
ID string |
||||
Meta |
||||
Annotations |
||||
|
||||
Spec TaskSpec `json:",omitempty"` |
||||
ServiceID string `json:",omitempty"` |
||||
Slot int `json:",omitempty"` |
||||
NodeID string `json:",omitempty"` |
||||
Status TaskStatus `json:",omitempty"` |
||||
DesiredState TaskState `json:",omitempty"` |
||||
NetworksAttachments []NetworkAttachment `json:",omitempty"` |
||||
GenericResources []GenericResource `json:",omitempty"` |
||||
} |
||||
|
||||
// TaskSpec represents the spec of a task.
|
||||
type TaskSpec struct { |
||||
// ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive.
|
||||
// PluginSpec is only used when the `Runtime` field is set to `plugin`
|
||||
// NetworkAttachmentSpec is used if the `Runtime` field is set to
|
||||
// `attachment`.
|
||||
ContainerSpec *ContainerSpec `json:",omitempty"` |
||||
PluginSpec *runtime.PluginSpec `json:",omitempty"` |
||||
NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` |
||||
|
||||
Resources *ResourceRequirements `json:",omitempty"` |
||||
RestartPolicy *RestartPolicy `json:",omitempty"` |
||||
Placement *Placement `json:",omitempty"` |
||||
Networks []NetworkAttachmentConfig `json:",omitempty"` |
||||
|
||||
// LogDriver specifies the LogDriver to use for tasks created from this
|
||||
// spec. If not present, the one on cluster default on swarm.Spec will be
|
||||
// used, finally falling back to the engine default if not specified.
|
||||
LogDriver *Driver `json:",omitempty"` |
||||
|
||||
// ForceUpdate is a counter that triggers an update even if no relevant
|
||||
// parameters have been changed.
|
||||
ForceUpdate uint64 |
||||
|
||||
Runtime RuntimeType `json:",omitempty"` |
||||
} |
||||
|
||||
// Resources represents resources (CPU/Memory).
|
||||
type Resources struct { |
||||
NanoCPUs int64 `json:",omitempty"` |
||||
MemoryBytes int64 `json:",omitempty"` |
||||
GenericResources []GenericResource `json:",omitempty"` |
||||
} |
||||
|
||||
// GenericResource represents a "user defined" resource which can
|
||||
// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1)
|
||||
type GenericResource struct { |
||||
NamedResourceSpec *NamedGenericResource `json:",omitempty"` |
||||
DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` |
||||
} |
||||
|
||||
// NamedGenericResource represents a "user defined" resource which is defined
|
||||
// as a string.
|
||||
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
|
||||
// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...)
|
||||
type NamedGenericResource struct { |
||||
Kind string `json:",omitempty"` |
||||
Value string `json:",omitempty"` |
||||
} |
||||
|
||||
// DiscreteGenericResource represents a "user defined" resource which is defined
|
||||
// as an integer
|
||||
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
|
||||
// Value is used to count the resource (SSD=5, HDD=3, ...)
|
||||
type DiscreteGenericResource struct { |
||||
Kind string `json:",omitempty"` |
||||
Value int64 `json:",omitempty"` |
||||
} |
||||
|
||||
// ResourceRequirements represents resources requirements.
|
||||
type ResourceRequirements struct { |
||||
Limits *Resources `json:",omitempty"` |
||||
Reservations *Resources `json:",omitempty"` |
||||
} |
||||
|
||||
// Placement represents orchestration parameters.
|
||||
type Placement struct { |
||||
Constraints []string `json:",omitempty"` |
||||
Preferences []PlacementPreference `json:",omitempty"` |
||||
MaxReplicas uint64 `json:",omitempty"` |
||||
|
||||
// Platforms stores all the platforms that the image can run on.
|
||||
// This field is used in the platform filter for scheduling. If empty,
|
||||
// then the platform filter is off, meaning there are no scheduling restrictions.
|
||||
Platforms []Platform `json:",omitempty"` |
||||
} |
||||
|
||||
// PlacementPreference provides a way to make the scheduler aware of factors
|
||||
// such as topology.
|
||||
type PlacementPreference struct { |
||||
Spread *SpreadOver |
||||
} |
||||
|
||||
// SpreadOver is a scheduling preference that instructs the scheduler to spread
|
||||
// tasks evenly over groups of nodes identified by labels.
|
||||
type SpreadOver struct { |
||||
// label descriptor, such as engine.labels.az
|
||||
SpreadDescriptor string |
||||
} |
||||
|
||||
// RestartPolicy represents the restart policy.
|
||||
type RestartPolicy struct { |
||||
Condition RestartPolicyCondition `json:",omitempty"` |
||||
Delay *time.Duration `json:",omitempty"` |
||||
MaxAttempts *uint64 `json:",omitempty"` |
||||
Window *time.Duration `json:",omitempty"` |
||||
} |
||||
|
||||
// RestartPolicyCondition represents when to restart.
|
||||
type RestartPolicyCondition string |
||||
|
||||
const ( |
||||
// RestartPolicyConditionNone NONE
|
||||
RestartPolicyConditionNone RestartPolicyCondition = "none" |
||||
// RestartPolicyConditionOnFailure ON_FAILURE
|
||||
RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" |
||||
// RestartPolicyConditionAny ANY
|
||||
RestartPolicyConditionAny RestartPolicyCondition = "any" |
||||
) |
||||
|
||||
// TaskStatus represents the status of a task.
|
||||
type TaskStatus struct { |
||||
Timestamp time.Time `json:",omitempty"` |
||||
State TaskState `json:",omitempty"` |
||||
Message string `json:",omitempty"` |
||||
Err string `json:",omitempty"` |
||||
ContainerStatus *ContainerStatus `json:",omitempty"` |
||||
PortStatus PortStatus `json:",omitempty"` |
||||
} |
||||
|
||||
// ContainerStatus represents the status of a container.
|
||||
type ContainerStatus struct { |
||||
ContainerID string |
||||
PID int |
||||
ExitCode int |
||||
} |
||||
|
||||
// PortStatus represents the port status of a task's host ports whose
|
||||
// service has published host ports
|
||||
type PortStatus struct { |
||||
Ports []PortConfig `json:",omitempty"` |
||||
} |
@ -0,0 +1,12 @@ |
||||
package time // import "github.com/docker/docker/api/types/time"
|
||||
|
||||
import ( |
||||
"strconv" |
||||
"time" |
||||
) |
||||
|
||||
// DurationToSecondsString converts the specified duration to the number
|
||||
// seconds it represents, formatted as a string.
|
||||
func DurationToSecondsString(duration time.Duration) string { |
||||
return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) |
||||
} |
@ -0,0 +1,129 @@ |
||||
package time // import "github.com/docker/docker/api/types/time"
|
||||
|
||||
import ( |
||||
"fmt" |
||||
"math" |
||||
"strconv" |
||||
"strings" |
||||
"time" |
||||
) |
||||
|
||||
// These are additional predefined layouts for use in Time.Format and Time.Parse
|
||||
// with --since and --until parameters for `docker logs` and `docker events`
|
||||
const ( |
||||
rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone
|
||||
rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone
|
||||
dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00
|
||||
dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00
|
||||
) |
||||
|
||||
// GetTimestamp tries to parse given string as golang duration,
|
||||
// then RFC3339 time and finally as a Unix timestamp. If
|
||||
// any of these were successful, it returns a Unix timestamp
|
||||
// as string otherwise returns the given value back.
|
||||
// In case of duration input, the returned timestamp is computed
|
||||
// as the given reference time minus the amount of the duration.
|
||||
func GetTimestamp(value string, reference time.Time) (string, error) { |
||||
if d, err := time.ParseDuration(value); value != "0" && err == nil { |
||||
return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil |
||||
} |
||||
|
||||
var format string |
||||
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
|
||||
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) |
||||
|
||||
if strings.Contains(value, ".") { |
||||
if parseInLocation { |
||||
format = rFC3339NanoLocal |
||||
} else { |
||||
format = time.RFC3339Nano |
||||
} |
||||
} else if strings.Contains(value, "T") { |
||||
// we want the number of colons in the T portion of the timestamp
|
||||
tcolons := strings.Count(value, ":") |
||||
// if parseInLocation is off and we have a +/- zone offset (not Z) then
|
||||
// there will be an extra colon in the input for the tz offset subtract that
|
||||
// colon from the tcolons count
|
||||
if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { |
||||
tcolons-- |
||||
} |
||||
if parseInLocation { |
||||
switch tcolons { |
||||
case 0: |
||||
format = "2006-01-02T15" |
||||
case 1: |
||||
format = "2006-01-02T15:04" |
||||
default: |
||||
format = rFC3339Local |
||||
} |
||||
} else { |
||||
switch tcolons { |
||||
case 0: |
||||
format = "2006-01-02T15Z07:00" |
||||
case 1: |
||||
format = "2006-01-02T15:04Z07:00" |
||||
default: |
||||
format = time.RFC3339 |
||||
} |
||||
} |
||||
} else if parseInLocation { |
||||
format = dateLocal |
||||
} else { |
||||
format = dateWithZone |
||||
} |
||||
|
||||
var t time.Time |
||||
var err error |
||||
|
||||
if parseInLocation { |
||||
t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) |
||||
} else { |
||||
t, err = time.Parse(format, value) |
||||
} |
||||
|
||||
if err != nil { |
||||
// if there is a `-` then it's an RFC3339 like timestamp
|
||||
if strings.Contains(value, "-") { |
||||
return "", err // was probably an RFC3339 like timestamp but the parser failed with an error
|
||||
} |
||||
if _, _, err := parseTimestamp(value); err != nil { |
||||
return "", fmt.Errorf("failed to parse value as time or duration: %q", value) |
||||
} |
||||
return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server)
|
||||
} |
||||
|
||||
return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil |
||||
} |
||||
|
||||
// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the
|
||||
// format "%d.%09d", time.Unix(), int64(time.Nanosecond()))
|
||||
// if the incoming nanosecond portion is longer or shorter than 9 digits it is
|
||||
// converted to nanoseconds. The expectation is that the seconds and
|
||||
// seconds will be used to create a time variable. For example:
|
||||
// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0)
|
||||
// if err == nil since := time.Unix(seconds, nanoseconds)
|
||||
// returns seconds as def(aultSeconds) if value == ""
|
||||
func ParseTimestamps(value string, def int64) (int64, int64, error) { |
||||
if value == "" { |
||||
return def, 0, nil |
||||
} |
||||
return parseTimestamp(value) |
||||
} |
||||
|
||||
func parseTimestamp(value string) (int64, int64, error) { |
||||
sa := strings.SplitN(value, ".", 2) |
||||
s, err := strconv.ParseInt(sa[0], 10, 64) |
||||
if err != nil { |
||||
return s, 0, err |
||||
} |
||||
if len(sa) != 2 { |
||||
return s, 0, nil |
||||
} |
||||
n, err := strconv.ParseInt(sa[1], 10, 64) |
||||
if err != nil { |
||||
return s, n, err |
||||
} |
||||
// should already be in nanoseconds but just in case convert n to nanoseconds
|
||||
n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) |
||||
return s, n, nil |
||||
} |
@ -0,0 +1,616 @@ |
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import ( |
||||
"errors" |
||||
"fmt" |
||||
"io" |
||||
"os" |
||||
"strings" |
||||
"time" |
||||
|
||||
"github.com/docker/docker/api/types/container" |
||||
"github.com/docker/docker/api/types/filters" |
||||
"github.com/docker/docker/api/types/mount" |
||||
"github.com/docker/docker/api/types/network" |
||||
"github.com/docker/docker/api/types/registry" |
||||
"github.com/docker/docker/api/types/swarm" |
||||
"github.com/docker/go-connections/nat" |
||||
) |
||||
|
||||
// RootFS returns Image's RootFS description including the layer IDs.
|
||||
type RootFS struct { |
||||
Type string |
||||
Layers []string `json:",omitempty"` |
||||
BaseLayer string `json:",omitempty"` |
||||
} |
||||
|
||||
// ImageInspect contains response of Engine API:
|
||||
// GET "/images/{name:.*}/json"
|
||||
type ImageInspect struct { |
||||
ID string `json:"Id"` |
||||
RepoTags []string |
||||
RepoDigests []string |
||||
Parent string |
||||
Comment string |
||||
Created string |
||||
Container string |
||||
ContainerConfig *container.Config |
||||
DockerVersion string |
||||
Author string |
||||
Config *container.Config |
||||
Architecture string |
||||
Os string |
||||
OsVersion string `json:",omitempty"` |
||||
Size int64 |
||||
VirtualSize int64 |
||||
GraphDriver GraphDriverData |
||||
RootFS RootFS |
||||
Metadata ImageMetadata |
||||
} |
||||
|
||||
// ImageMetadata contains engine-local data about the image
|
||||
type ImageMetadata struct { |
||||
LastTagTime time.Time `json:",omitempty"` |
||||
} |
||||
|
||||
// Container contains response of Engine API:
|
||||
// GET "/containers/json"
|
||||
type Container struct { |
||||
ID string `json:"Id"` |
||||
Names []string |
||||
Image string |
||||
ImageID string |
||||
Command string |
||||
Created int64 |
||||
Ports []Port |
||||
SizeRw int64 `json:",omitempty"` |
||||
SizeRootFs int64 `json:",omitempty"` |
||||
Labels map[string]string |
||||
State string |
||||
Status string |
||||
HostConfig struct { |
||||
NetworkMode string `json:",omitempty"` |
||||
} |
||||
NetworkSettings *SummaryNetworkSettings |
||||
Mounts []MountPoint |
||||
} |
||||
|
||||
// CopyConfig contains request body of Engine API:
|
||||
// POST "/containers/"+containerID+"/copy"
|
||||
type CopyConfig struct { |
||||
Resource string |
||||
} |
||||
|
||||
// ContainerPathStat is used to encode the header from
|
||||
// GET "/containers/{name:.*}/archive"
|
||||
// "Name" is the file or directory name.
|
||||
type ContainerPathStat struct { |
||||
Name string `json:"name"` |
||||
Size int64 `json:"size"` |
||||
Mode os.FileMode `json:"mode"` |
||||
Mtime time.Time `json:"mtime"` |
||||
LinkTarget string `json:"linkTarget"` |
||||
} |
||||
|
||||
// ContainerStats contains response of Engine API:
|
||||
// GET "/stats"
|
||||
type ContainerStats struct { |
||||
Body io.ReadCloser `json:"body"` |
||||
OSType string `json:"ostype"` |
||||
} |
||||
|
||||
// Ping contains response of Engine API:
|
||||
// GET "/_ping"
|
||||
type Ping struct { |
||||
APIVersion string |
||||
OSType string |
||||
Experimental bool |
||||
BuilderVersion BuilderVersion |
||||
} |
||||
|
||||
// ComponentVersion describes the version information for a specific component.
|
||||
type ComponentVersion struct { |
||||
Name string |
||||
Version string |
||||
Details map[string]string `json:",omitempty"` |
||||
} |
||||
|
||||
// Version contains response of Engine API:
|
||||
// GET "/version"
|
||||
type Version struct { |
||||
Platform struct{ Name string } `json:",omitempty"` |
||||
Components []ComponentVersion `json:",omitempty"` |
||||
|
||||
// The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility
|
||||
|
||||
Version string |
||||
APIVersion string `json:"ApiVersion"` |
||||
MinAPIVersion string `json:"MinAPIVersion,omitempty"` |
||||
GitCommit string |
||||
GoVersion string |
||||
Os string |
||||
Arch string |
||||
KernelVersion string `json:",omitempty"` |
||||
Experimental bool `json:",omitempty"` |
||||
BuildTime string `json:",omitempty"` |
||||
} |
||||
|
||||
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
|
||||
// in the version-string of external tools, such as containerd, or runC.
|
||||
type Commit struct { |
||||
ID string // ID is the actual commit ID of external tool.
|
||||
Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
|
||||
} |
||||
|
||||
// Info contains response of Engine API:
|
||||
// GET "/info"
|
||||
type Info struct { |
||||
ID string |
||||
Containers int |
||||
ContainersRunning int |
||||
ContainersPaused int |
||||
ContainersStopped int |
||||
Images int |
||||
Driver string |
||||
DriverStatus [][2]string |
||||
SystemStatus [][2]string |
||||
Plugins PluginsInfo |
||||
MemoryLimit bool |
||||
SwapLimit bool |
||||
KernelMemory bool |
||||
KernelMemoryTCP bool |
||||
CPUCfsPeriod bool `json:"CpuCfsPeriod"` |
||||
CPUCfsQuota bool `json:"CpuCfsQuota"` |
||||
CPUShares bool |
||||
CPUSet bool |
||||
PidsLimit bool |
||||
IPv4Forwarding bool |
||||
BridgeNfIptables bool |
||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` |
||||
Debug bool |
||||
NFd int |
||||
OomKillDisable bool |
||||
NGoroutines int |
||||
SystemTime string |
||||
LoggingDriver string |
||||
CgroupDriver string |
||||
NEventsListener int |
||||
KernelVersion string |
||||
OperatingSystem string |
||||
OSVersion string |
||||
OSType string |
||||
Architecture string |
||||
IndexServerAddress string |
||||
RegistryConfig *registry.ServiceConfig |
||||
NCPU int |
||||
MemTotal int64 |
||||
GenericResources []swarm.GenericResource |
||||
DockerRootDir string |
||||
HTTPProxy string `json:"HttpProxy"` |
||||
HTTPSProxy string `json:"HttpsProxy"` |
||||
NoProxy string |
||||
Name string |
||||
Labels []string |
||||
ExperimentalBuild bool |
||||
ServerVersion string |
||||
ClusterStore string |
||||
ClusterAdvertise string |
||||
Runtimes map[string]Runtime |
||||
DefaultRuntime string |
||||
Swarm swarm.Info |
||||
// LiveRestoreEnabled determines whether containers should be kept
|
||||
// running when the daemon is shutdown or upon daemon start if
|
||||
// running containers are detected
|
||||
LiveRestoreEnabled bool |
||||
Isolation container.Isolation |
||||
InitBinary string |
||||
ContainerdCommit Commit |
||||
RuncCommit Commit |
||||
InitCommit Commit |
||||
SecurityOptions []string |
||||
ProductLicense string `json:",omitempty"` |
||||
Warnings []string |
||||
} |
||||
|
||||
// KeyValue holds a key/value pair
|
||||
type KeyValue struct { |
||||
Key, Value string |
||||
} |
||||
|
||||
// SecurityOpt contains the name and options of a security option
|
||||
type SecurityOpt struct { |
||||
Name string |
||||
Options []KeyValue |
||||
} |
||||
|
||||
// DecodeSecurityOptions decodes a security options string slice to a type safe
|
||||
// SecurityOpt
|
||||
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { |
||||
so := []SecurityOpt{} |
||||
for _, opt := range opts { |
||||
// support output from a < 1.13 docker daemon
|
||||
if !strings.Contains(opt, "=") { |
||||
so = append(so, SecurityOpt{Name: opt}) |
||||
continue |
||||
} |
||||
secopt := SecurityOpt{} |
||||
split := strings.Split(opt, ",") |
||||
for _, s := range split { |
||||
kv := strings.SplitN(s, "=", 2) |
||||
if len(kv) != 2 { |
||||
return nil, fmt.Errorf("invalid security option %q", s) |
||||
} |
||||
if kv[0] == "" || kv[1] == "" { |
||||
return nil, errors.New("invalid empty security option") |
||||
} |
||||
if kv[0] == "name" { |
||||
secopt.Name = kv[1] |
||||
continue |
||||
} |
||||
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) |
||||
} |
||||
so = append(so, secopt) |
||||
} |
||||
return so, nil |
||||
} |
||||
|
||||
// PluginsInfo is a temp struct holding Plugins name
|
||||
// registered with docker daemon. It is used by Info struct
|
||||
type PluginsInfo struct { |
||||
// List of Volume plugins registered
|
||||
Volume []string |
||||
// List of Network plugins registered
|
||||
Network []string |
||||
// List of Authorization plugins registered
|
||||
Authorization []string |
||||
// List of Log plugins registered
|
||||
Log []string |
||||
} |
||||
|
||||
// ExecStartCheck is a temp struct used by execStart
|
||||
// Config fields is part of ExecConfig in runconfig package
|
||||
type ExecStartCheck struct { |
||||
// ExecStart will first check if it's detached
|
||||
Detach bool |
||||
// Check if there's a tty
|
||||
Tty bool |
||||
} |
||||
|
||||
// HealthcheckResult stores information about a single run of a healthcheck probe
|
||||
type HealthcheckResult struct { |
||||
Start time.Time // Start is the time this check started
|
||||
End time.Time // End is the time this check ended
|
||||
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
|
||||
Output string // Output from last check
|
||||
} |
||||
|
||||
// Health states
|
||||
const ( |
||||
NoHealthcheck = "none" // Indicates there is no healthcheck
|
||||
Starting = "starting" // Starting indicates that the container is not yet ready
|
||||
Healthy = "healthy" // Healthy indicates that the container is running correctly
|
||||
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
|
||||
) |
||||
|
||||
// Health stores information about the container's healthcheck results
|
||||
type Health struct { |
||||
Status string // Status is one of Starting, Healthy or Unhealthy
|
||||
FailingStreak int // FailingStreak is the number of consecutive failures
|
||||
Log []*HealthcheckResult // Log contains the last few results (oldest first)
|
||||
} |
||||
|
||||
// ContainerState stores container's running state
|
||||
// it's part of ContainerJSONBase and will return by "inspect" command
|
||||
type ContainerState struct { |
||||
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
Running bool |
||||
Paused bool |
||||
Restarting bool |
||||
OOMKilled bool |
||||
Dead bool |
||||
Pid int |
||||
ExitCode int |
||||
Error string |
||||
StartedAt string |
||||
FinishedAt string |
||||
Health *Health `json:",omitempty"` |
||||
} |
||||
|
||||
// ContainerNode stores information about the node that a container
|
||||
// is running on. It's only available in Docker Swarm
|
||||
type ContainerNode struct { |
||||
ID string |
||||
IPAddress string `json:"IP"` |
||||
Addr string |
||||
Name string |
||||
Cpus int |
||||
Memory int64 |
||||
Labels map[string]string |
||||
} |
||||
|
||||
// ContainerJSONBase contains response of Engine API:
|
||||
// GET "/containers/{name:.*}/json"
|
||||
type ContainerJSONBase struct { |
||||
ID string `json:"Id"` |
||||
Created string |
||||
Path string |
||||
Args []string |
||||
State *ContainerState |
||||
Image string |
||||
ResolvConfPath string |
||||
HostnamePath string |
||||
HostsPath string |
||||
LogPath string |
||||
Node *ContainerNode `json:",omitempty"` |
||||
Name string |
||||
RestartCount int |
||||
Driver string |
||||
Platform string |
||||
MountLabel string |
||||
ProcessLabel string |
||||
AppArmorProfile string |
||||
ExecIDs []string |
||||
HostConfig *container.HostConfig |
||||
GraphDriver GraphDriverData |
||||
SizeRw *int64 `json:",omitempty"` |
||||
SizeRootFs *int64 `json:",omitempty"` |
||||
} |
||||
|
||||
// ContainerJSON is newly used struct along with MountPoint
|
||||
type ContainerJSON struct { |
||||
*ContainerJSONBase |
||||
Mounts []MountPoint |
||||
Config *container.Config |
||||
NetworkSettings *NetworkSettings |
||||
} |
||||
|
||||
// NetworkSettings exposes the network settings in the api
|
||||
type NetworkSettings struct { |
||||
NetworkSettingsBase |
||||
DefaultNetworkSettings |
||||
Networks map[string]*network.EndpointSettings |
||||
} |
||||
|
||||
// SummaryNetworkSettings provides a summary of container's networks
|
||||
// in /containers/json
|
||||
type SummaryNetworkSettings struct { |
||||
Networks map[string]*network.EndpointSettings |
||||
} |
||||
|
||||
// NetworkSettingsBase holds basic information about networks
|
||||
type NetworkSettingsBase struct { |
||||
Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
|
||||
SandboxID string // SandboxID uniquely represents a container's network stack
|
||||
HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
|
||||
LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
|
||||
LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
|
||||
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
|
||||
SandboxKey string // SandboxKey identifies the sandbox
|
||||
SecondaryIPAddresses []network.Address |
||||
SecondaryIPv6Addresses []network.Address |
||||
} |
||||
|
||||
// DefaultNetworkSettings holds network information
|
||||
// during the 2 release deprecation period.
|
||||
// It will be removed in Docker 1.11.
|
||||
type DefaultNetworkSettings struct { |
||||
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
|
||||
Gateway string // Gateway holds the gateway address for the network
|
||||
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
|
||||
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
|
||||
IPAddress string // IPAddress holds the IPv4 address for the network
|
||||
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
|
||||
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
|
||||
MacAddress string // MacAddress holds the MAC address for the network
|
||||
} |
||||
|
||||
// MountPoint represents a mount point configuration inside the container.
|
||||
// This is used for reporting the mountpoints in use by a container.
|
||||
type MountPoint struct { |
||||
Type mount.Type `json:",omitempty"` |
||||
Name string `json:",omitempty"` |
||||
Source string |
||||
Destination string |
||||
Driver string `json:",omitempty"` |
||||
Mode string |
||||
RW bool |
||||
Propagation mount.Propagation |
||||
} |
||||
|
||||
// NetworkResource is the body of the "get network" http response message
|
||||
type NetworkResource struct { |
||||
Name string // Name is the requested name of the network
|
||||
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
|
||||
Created time.Time // Created is the time the network created
|
||||
Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level)
|
||||
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
|
||||
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
|
||||
IPAM network.IPAM // IPAM is the network's IP Address Management
|
||||
Internal bool // Internal represents if the network is used internal only
|
||||
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
|
||||
Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
|
||||
ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network.
|
||||
ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
|
||||
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
|
||||
Options map[string]string // Options holds the network specific options to use for when creating the network
|
||||
Labels map[string]string // Labels holds metadata specific to the network being created
|
||||
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
|
||||
Services map[string]network.ServiceInfo `json:",omitempty"` |
||||
} |
||||
|
||||
// EndpointResource contains network resources allocated and used for a container in a network
|
||||
type EndpointResource struct { |
||||
Name string |
||||
EndpointID string |
||||
MacAddress string |
||||
IPv4Address string |
||||
IPv6Address string |
||||
} |
||||
|
||||
// NetworkCreate is the expected body of the "create network" http request message
|
||||
type NetworkCreate struct { |
||||
// Check for networks with duplicate names.
|
||||
// Network is primarily keyed based on a random ID and not on the name.
|
||||
// Network name is strictly a user-friendly alias to the network
|
||||
// which is uniquely identified using ID.
|
||||
// And there is no guaranteed way to check for duplicates.
|
||||
// Option CheckDuplicate is there to provide a best effort checking of any networks
|
||||
// which has the same name but it is not guaranteed to catch all name collisions.
|
||||
CheckDuplicate bool |
||||
Driver string |
||||
Scope string |
||||
EnableIPv6 bool |
||||
IPAM *network.IPAM |
||||
Internal bool |
||||
Attachable bool |
||||
Ingress bool |
||||
ConfigOnly bool |
||||
ConfigFrom *network.ConfigReference |
||||
Options map[string]string |
||||
Labels map[string]string |
||||
} |
||||
|
||||
// NetworkCreateRequest is the request message sent to the server for network create call.
|
||||
type NetworkCreateRequest struct { |
||||
NetworkCreate |
||||
Name string |
||||
} |
||||
|
||||
// NetworkCreateResponse is the response message sent by the server for network create call
|
||||
type NetworkCreateResponse struct { |
||||
ID string `json:"Id"` |
||||
Warning string |
||||
} |
||||
|
||||
// NetworkConnect represents the data to be used to connect a container to the network
|
||||
type NetworkConnect struct { |
||||
Container string |
||||
EndpointConfig *network.EndpointSettings `json:",omitempty"` |
||||
} |
||||
|
||||
// NetworkDisconnect represents the data to be used to disconnect a container from the network
|
||||
type NetworkDisconnect struct { |
||||
Container string |
||||
Force bool |
||||
} |
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network
|
||||
type NetworkInspectOptions struct { |
||||
Scope string |
||||
Verbose bool |
||||
} |
||||
|
||||
// Checkpoint represents the details of a checkpoint
|
||||
type Checkpoint struct { |
||||
Name string // Name is the name of the checkpoint
|
||||
} |
||||
|
||||
// Runtime describes an OCI runtime
|
||||
type Runtime struct { |
||||
Path string `json:"path"` |
||||
Args []string `json:"runtimeArgs,omitempty"` |
||||
} |
||||
|
||||
// DiskUsage contains response of Engine API:
|
||||
// GET "/system/df"
|
||||
type DiskUsage struct { |
||||
LayersSize int64 |
||||
Images []*ImageSummary |
||||
Containers []*Container |
||||
Volumes []*Volume |
||||
BuildCache []*BuildCache |
||||
BuilderSize int64 // deprecated
|
||||
} |
||||
|
||||
// ContainersPruneReport contains the response for Engine API:
|
||||
// POST "/containers/prune"
|
||||
type ContainersPruneReport struct { |
||||
ContainersDeleted []string |
||||
SpaceReclaimed uint64 |
||||
} |
||||
|
||||
// VolumesPruneReport contains the response for Engine API:
|
||||
// POST "/volumes/prune"
|
||||
type VolumesPruneReport struct { |
||||
VolumesDeleted []string |
||||
SpaceReclaimed uint64 |
||||
} |
||||
|
||||
// ImagesPruneReport contains the response for Engine API:
|
||||
// POST "/images/prune"
|
||||
type ImagesPruneReport struct { |
||||
ImagesDeleted []ImageDeleteResponseItem |
||||
SpaceReclaimed uint64 |
||||
} |
||||
|
||||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type BuildCachePruneReport struct { |
||||
CachesDeleted []string |
||||
SpaceReclaimed uint64 |
||||
} |
||||
|
||||
// NetworksPruneReport contains the response for Engine API:
|
||||
// POST "/networks/prune"
|
||||
type NetworksPruneReport struct { |
||||
NetworksDeleted []string |
||||
} |
||||
|
||||
// SecretCreateResponse contains the information returned to a client
|
||||
// on the creation of a new secret.
|
||||
type SecretCreateResponse struct { |
||||
// ID is the id of the created secret.
|
||||
ID string |
||||
} |
||||
|
||||
// SecretListOptions holds parameters to list secrets
|
||||
type SecretListOptions struct { |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// ConfigCreateResponse contains the information returned to a client
|
||||
// on the creation of a new config.
|
||||
type ConfigCreateResponse struct { |
||||
// ID is the id of the created config.
|
||||
ID string |
||||
} |
||||
|
||||
// ConfigListOptions holds parameters to list configs
|
||||
type ConfigListOptions struct { |
||||
Filters filters.Args |
||||
} |
||||
|
||||
// PushResult contains the tag, manifest digest, and manifest size from the
|
||||
// push. It's used to signal this information to the trust code in the client
|
||||
// so it can sign the manifest if necessary.
|
||||
type PushResult struct { |
||||
Tag string |
||||
Digest string |
||||
Size int |
||||
} |
||||
|
||||
// BuildResult contains the image id of a successful build
|
||||
type BuildResult struct { |
||||
ID string |
||||
} |
||||
|
||||
// BuildCache contains information about a build cache record
|
||||
type BuildCache struct { |
||||
ID string |
||||
Parent string |
||||
Type string |
||||
Description string |
||||
InUse bool |
||||
Shared bool |
||||
Size int64 |
||||
CreatedAt time.Time |
||||
LastUsedAt *time.Time |
||||
UsageCount int |
||||
} |
||||
|
||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||
type BuildCachePruneOptions struct { |
||||
All bool |
||||
KeepStorage int64 |
||||
Filters filters.Args |
||||
} |
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue