diff options
author | Wim <wim@42.be> | 2018-03-04 23:46:13 +0100 |
---|---|---|
committer | Wim <wim@42.be> | 2018-03-04 23:46:13 +0100 |
commit | 25a72113b122f984c904b24c4af23a1cba1eff45 (patch) | |
tree | f0fb7067d7c958d60ac964afa5b8d5fb79ebc339 /vendor/github.com/pkg/sftp | |
parent | 79c4ad5015bd2be47b32141c6d53f0d128bf865b (diff) | |
download | matterbridge-msglm-25a72113b122f984c904b24c4af23a1cba1eff45.tar.gz matterbridge-msglm-25a72113b122f984c904b24c4af23a1cba1eff45.tar.bz2 matterbridge-msglm-25a72113b122f984c904b24c4af23a1cba1eff45.zip |
Add vendor files for spf13/viper
Diffstat (limited to 'vendor/github.com/pkg/sftp')
35 files changed, 5908 insertions, 0 deletions
diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE new file mode 100644 index 00000000..b7b53921 --- /dev/null +++ b/vendor/github.com/pkg/sftp/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2013, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go new file mode 100644 index 00000000..18412e33 --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -0,0 +1,241 @@ +package sftp + +// ssh_FXP_ATTRS support +// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 + +import ( + "os" + "syscall" + "time" +) + +const ( + ssh_FILEXFER_ATTR_SIZE = 0x00000001 + ssh_FILEXFER_ATTR_UIDGID = 0x00000002 + ssh_FILEXFER_ATTR_PERMISSIONS = 0x00000004 + ssh_FILEXFER_ATTR_ACMODTIME = 0x00000008 + ssh_FILEXFER_ATTR_EXTENDED = 0x80000000 +) + +// fileInfo is an artificial type designed to satisfy os.FileInfo. +type fileInfo struct { + name string + size int64 + mode os.FileMode + mtime time.Time + sys interface{} +} + +// Name returns the base name of the file. +func (fi *fileInfo) Name() string { return fi.name } + +// Size returns the length in bytes for regular files; system-dependent for others. +func (fi *fileInfo) Size() int64 { return fi.size } + +// Mode returns file mode bits. +func (fi *fileInfo) Mode() os.FileMode { return fi.mode } + +// ModTime returns the last modification time of the file. +func (fi *fileInfo) ModTime() time.Time { return fi.mtime } + +// IsDir returns true if the file is a directory. +func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } + +func (fi *fileInfo) Sys() interface{} { return fi.sys } + +// FileStat holds the original unmarshalled values from a call to READDIR or *STAT. +// It is exported for the purposes of accessing the raw values via os.FileInfo.Sys() +type FileStat struct { + Size uint64 + Mode uint32 + Mtime uint32 + Atime uint32 + UID uint32 + GID uint32 + Extended []StatExtended +} + +// StatExtended contains additional, extended information for a FileStat. +type StatExtended struct { + ExtType string + ExtData string +} + +func fileInfoFromStat(st *FileStat, name string) os.FileInfo { + fs := &fileInfo{ + name: name, + size: int64(st.Size), + mode: toFileMode(st.Mode), + mtime: time.Unix(int64(st.Mtime), 0), + sys: st, + } + return fs +} + +func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) { + mtime := fi.ModTime().Unix() + atime := mtime + var flags uint32 = ssh_FILEXFER_ATTR_SIZE | + ssh_FILEXFER_ATTR_PERMISSIONS | + ssh_FILEXFER_ATTR_ACMODTIME + + fileStat := FileStat{ + Size: uint64(fi.Size()), + Mode: fromFileMode(fi.Mode()), + Mtime: uint32(mtime), + Atime: uint32(atime), + } + + // os specific file stat decoding + fileStatFromInfoOs(fi, &flags, &fileStat) + + return flags, fileStat +} + +func unmarshalAttrs(b []byte) (*FileStat, []byte) { + flags, b := unmarshalUint32(b) + return getFileStat(flags, b) +} + +func getFileStat(flags uint32, b []byte) (*FileStat, []byte) { + var fs FileStat + if flags&ssh_FILEXFER_ATTR_SIZE == ssh_FILEXFER_ATTR_SIZE { + fs.Size, b = unmarshalUint64(b) + } + if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { + fs.UID, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { + fs.GID, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_PERMISSIONS == ssh_FILEXFER_ATTR_PERMISSIONS { + fs.Mode, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_ACMODTIME == ssh_FILEXFER_ATTR_ACMODTIME { + fs.Atime, b = unmarshalUint32(b) + fs.Mtime, b = unmarshalUint32(b) + } + if flags&ssh_FILEXFER_ATTR_EXTENDED == ssh_FILEXFER_ATTR_EXTENDED { + var count uint32 + count, b = unmarshalUint32(b) + ext := make([]StatExtended, count) + for i := uint32(0); i < count; i++ { + var typ string + var data string + typ, b = unmarshalString(b) + data, b = unmarshalString(b) + ext[i] = StatExtended{typ, data} + } + fs.Extended = ext + } + return &fs, b +} + +func marshalFileInfo(b []byte, fi os.FileInfo) []byte { + // attributes variable struct, and also variable per protocol version + // spec version 3 attributes: + // uint32 flags + // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE + // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID + // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS + // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME + // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED + // string extended_type + // string extended_data + // ... more extended data (extended_type - extended_data pairs), + // so that number of pairs equals extended_count + + flags, fileStat := fileStatFromInfo(fi) + + b = marshalUint32(b, flags) + if flags&ssh_FILEXFER_ATTR_SIZE != 0 { + b = marshalUint64(b, fileStat.Size) + } + if flags&ssh_FILEXFER_ATTR_UIDGID != 0 { + b = marshalUint32(b, fileStat.UID) + b = marshalUint32(b, fileStat.GID) + } + if flags&ssh_FILEXFER_ATTR_PERMISSIONS != 0 { + b = marshalUint32(b, fileStat.Mode) + } + if flags&ssh_FILEXFER_ATTR_ACMODTIME != 0 { + b = marshalUint32(b, fileStat.Atime) + b = marshalUint32(b, fileStat.Mtime) + } + + return b +} + +// toFileMode converts sftp filemode bits to the os.FileMode specification +func toFileMode(mode uint32) os.FileMode { + var fm = os.FileMode(mode & 0777) + switch mode & syscall.S_IFMT { + case syscall.S_IFBLK: + fm |= os.ModeDevice + case syscall.S_IFCHR: + fm |= os.ModeDevice | os.ModeCharDevice + case syscall.S_IFDIR: + fm |= os.ModeDir + case syscall.S_IFIFO: + fm |= os.ModeNamedPipe + case syscall.S_IFLNK: + fm |= os.ModeSymlink + case syscall.S_IFREG: + // nothing to do + case syscall.S_IFSOCK: + fm |= os.ModeSocket + } + if mode&syscall.S_ISGID != 0 { + fm |= os.ModeSetgid + } + if mode&syscall.S_ISUID != 0 { + fm |= os.ModeSetuid + } + if mode&syscall.S_ISVTX != 0 { + fm |= os.ModeSticky + } + return fm +} + +// fromFileMode converts from the os.FileMode specification to sftp filemode bits +func fromFileMode(mode os.FileMode) uint32 { + ret := uint32(0) + + if mode&os.ModeDevice != 0 { + if mode&os.ModeCharDevice != 0 { + ret |= syscall.S_IFCHR + } else { + ret |= syscall.S_IFBLK + } + } + if mode&os.ModeDir != 0 { + ret |= syscall.S_IFDIR + } + if mode&os.ModeSymlink != 0 { + ret |= syscall.S_IFLNK + } + if mode&os.ModeNamedPipe != 0 { + ret |= syscall.S_IFIFO + } + if mode&os.ModeSetgid != 0 { + ret |= syscall.S_ISGID + } + if mode&os.ModeSetuid != 0 { + ret |= syscall.S_ISUID + } + if mode&os.ModeSticky != 0 { + ret |= syscall.S_ISVTX + } + if mode&os.ModeSocket != 0 { + ret |= syscall.S_IFSOCK + } + + if mode&os.ModeType == 0 { + ret |= syscall.S_IFREG + } + ret |= uint32(mode & os.ModePerm) + + return ret +} diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go new file mode 100644 index 00000000..81cf3eac --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_stubs.go @@ -0,0 +1,11 @@ +// +build !cgo,!plan9 windows android + +package sftp + +import ( + "os" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + // todo +} diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go new file mode 100644 index 00000000..ab6ecdea --- /dev/null +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -0,0 +1,17 @@ +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris +// +build cgo + +package sftp + +import ( + "os" + "syscall" +) + +func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { + if statt, ok := fi.Sys().(*syscall.Stat_t); ok { + *flags |= ssh_FILEXFER_ATTR_UIDGID + fileStat.UID = statt.Uid + fileStat.GID = statt.Gid + } +} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go new file mode 100644 index 00000000..c90d0b23 --- /dev/null +++ b/vendor/github.com/pkg/sftp/client.go @@ -0,0 +1,1174 @@ +package sftp + +import ( + "bytes" + "encoding/binary" + "io" + "os" + "path" + "sync/atomic" + "time" + + "github.com/kr/fs" + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" +) + +// InternalInconsistency indicates the packets sent and the data queued to be +// written to the file don't match up. It is an unusual error and usually is +// caused by bad behavior server side or connection issues. The error is +// limited in scope to the call where it happened, the client object is still +// OK to use as long as the connection is still open. +var InternalInconsistency = errors.New("internal inconsistency") + +// A ClientOption is a function which applies configuration to a Client. +type ClientOption func(*Client) error + +// This is based on Openssh's max accepted size of 1<<18 - overhead +const maxMaxPacket = (1 << 18) - 1024 + +// MaxPacket sets the maximum size of the payload. The size param must be +// between 32768 (1<<15) and 261120 ((1 << 18) - 1024). The minimum size is +// given by the RFC, while the maximum size is a de-facto standard based on +// Openssh's SFTP server which won't accept packets much larger than that. +// +// Note if you aren't using Openssh's sftp server and get the error "failed to +// send packet header: EOF" when copying a large file try lowering this number. +func MaxPacket(size int) ClientOption { + return func(c *Client) error { + if size < 1<<15 { + return errors.Errorf("size must be greater or equal to 32k") + } + if size > maxMaxPacket { + return errors.Errorf("max packet size is too large (see docs)") + } + c.maxPacket = size + return nil + } +} + +// NewClient creates a new SFTP client on conn, using zero or more option +// functions. +func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) { + s, err := conn.NewSession() + if err != nil { + return nil, err + } + if err := s.RequestSubsystem("sftp"); err != nil { + return nil, err + } + pw, err := s.StdinPipe() + if err != nil { + return nil, err + } + pr, err := s.StdoutPipe() + if err != nil { + return nil, err + } + + return NewClientPipe(pr, pw, opts...) +} + +// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. +// This can be used for connecting to an SFTP server over TCP/TLS or by using +// the system's ssh client program (e.g. via exec.Command). +func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) { + sftp := &Client{ + clientConn: clientConn{ + conn: conn{ + Reader: rd, + WriteCloser: wr, + }, + inflight: make(map[uint32]chan<- result), + }, + maxPacket: 1 << 15, + } + if err := sftp.applyOptions(opts...); err != nil { + wr.Close() + return nil, err + } + if err := sftp.sendInit(); err != nil { + wr.Close() + return nil, err + } + if err := sftp.recvVersion(); err != nil { + wr.Close() + return nil, err + } + sftp.clientConn.wg.Add(1) + go sftp.loop() + return sftp, nil +} + +// Client represents an SFTP session on a *ssh.ClientConn SSH connection. +// Multiple Clients can be active on a single SSH connection, and a Client +// may be called concurrently from multiple Goroutines. +// +// Client implements the github.com/kr/fs.FileSystem interface. +type Client struct { + clientConn + + maxPacket int // max packet size read or written. + nextid uint32 +} + +// Create creates the named file mode 0666 (before umask), truncating it if it +// already exists. If successful, methods on the returned File can be used for +// I/O; the associated file descriptor has mode O_RDWR. If you need more +// control over the flags/mode used to open the file see client.OpenFile. +func (c *Client) Create(path string) (*File, error) { + return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) +} + +const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + +func (c *Client) sendInit() error { + return c.clientConn.conn.sendPacket(sshFxInitPacket{ + Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + }) +} + +// returns the next value of c.nextid +func (c *Client) nextID() uint32 { + return atomic.AddUint32(&c.nextid, 1) +} + +func (c *Client) recvVersion() error { + typ, data, err := c.recvPacket() + if err != nil { + return err + } + if typ != ssh_FXP_VERSION { + return &unexpectedPacketErr{ssh_FXP_VERSION, typ} + } + + version, _ := unmarshalUint32(data) + if version != sftpProtocolVersion { + return &unexpectedVersionErr{sftpProtocolVersion, version} + } + + return nil +} + +// Walk returns a new Walker rooted at root. +func (c *Client) Walk(root string) *fs.Walker { + return fs.WalkFS(root, c) +} + +// ReadDir reads the directory named by dirname and returns a list of +// directory entries. +func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { + handle, err := c.opendir(p) + if err != nil { + return nil, err + } + defer c.close(handle) // this has to defer earlier than the lock below + var attrs []os.FileInfo + var done = false + for !done { + id := c.nextID() + typ, data, err1 := c.sendPacket(sshFxpReaddirPacket{ + ID: id, + Handle: handle, + }) + if err1 != nil { + err = err1 + done = true + break + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + for i := uint32(0); i < count; i++ { + var filename string + filename, data = unmarshalString(data) + _, data = unmarshalString(data) // discard longname + var attr *FileStat + attr, data = unmarshalAttrs(data) + if filename == "." || filename == ".." { + continue + } + attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) + } + case ssh_FXP_STATUS: + // TODO(dfc) scope warning! + err = normaliseError(unmarshalStatus(id, data)) + done = true + default: + return nil, unimplementedPacketErr(typ) + } + } + if err == io.EOF { + err = nil + } + return attrs, err +} + +func (c *Client) opendir(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpOpendirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_HANDLE: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return handle, nil + case ssh_FXP_STATUS: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Stat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. +func (c *Client) Stat(p string) (os.FileInfo, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpStatPacket{ + ID: id, + Path: p, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return fileInfoFromStat(attr, path.Base(p)), nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// Lstat returns a FileInfo structure describing the file specified by path 'p'. +// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. +func (c *Client) Lstat(p string) (os.FileInfo, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpLstatPacket{ + ID: id, + Path: p, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return fileInfoFromStat(attr, path.Base(p)), nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// ReadLink reads the target of a symbolic link. +func (c *Client) ReadLink(p string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpReadlinkPacket{ + ID: id, + Path: p, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore dummy attributes + return filename, nil + case ssh_FXP_STATUS: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Symlink creates a symbolic link at 'newname', pointing at target 'oldname' +func (c *Client) Symlink(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpSymlinkPacket{ + ID: id, + Linkpath: newname, + Targetpath: oldname, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// setstat is a convience wrapper to allow for changing of various parts of the file descriptor. +func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpSetstatPacket{ + ID: id, + Path: path, + Flags: flags, + Attrs: attrs, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Chtimes changes the access and modification times of the named file. +func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { + type times struct { + Atime uint32 + Mtime uint32 + } + attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} + return c.setstat(path, ssh_FILEXFER_ATTR_ACMODTIME, attrs) +} + +// Chown changes the user and group owners of the named file. +func (c *Client) Chown(path string, uid, gid int) error { + type owner struct { + UID uint32 + GID uint32 + } + attrs := owner{uint32(uid), uint32(gid)} + return c.setstat(path, ssh_FILEXFER_ATTR_UIDGID, attrs) +} + +// Chmod changes the permissions of the named file. +func (c *Client) Chmod(path string, mode os.FileMode) error { + return c.setstat(path, ssh_FILEXFER_ATTR_PERMISSIONS, uint32(mode)) +} + +// Truncate sets the size of the named file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +func (c *Client) Truncate(path string, size int64) error { + return c.setstat(path, ssh_FILEXFER_ATTR_SIZE, uint64(size)) +} + +// Open opens the named file for reading. If successful, methods on the +// returned file can be used for reading; the associated file descriptor +// has mode O_RDONLY. +func (c *Client) Open(path string) (*File, error) { + return c.open(path, flags(os.O_RDONLY)) +} + +// OpenFile is the generalized open call; most users will use Open or +// Create instead. It opens the named file with specified flag (O_RDONLY +// etc.). If successful, methods on the returned File can be used for I/O. +func (c *Client) OpenFile(path string, f int) (*File, error) { + return c.open(path, flags(f)) +} + +func (c *Client) open(path string, pflags uint32) (*File, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpOpenPacket{ + ID: id, + Path: path, + Pflags: pflags, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_HANDLE: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + handle, _ := unmarshalString(data) + return &File{c: c, path: path, handle: handle}, nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// close closes a handle handle previously returned in the response +// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid +// immediately after this request has been sent. +func (c *Client) close(handle string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpClosePacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) fstat(handle string) (*FileStat, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpFstatPacket{ + ID: id, + Handle: handle, + }) + if err != nil { + return nil, err + } + switch typ { + case ssh_FXP_ATTRS: + sid, data := unmarshalUint32(data) + if sid != id { + return nil, &unexpectedIDErr{id, sid} + } + attr, _ := unmarshalAttrs(data) + return attr, nil + case ssh_FXP_STATUS: + return nil, normaliseError(unmarshalStatus(id, data)) + default: + return nil, unimplementedPacketErr(typ) + } +} + +// StatVFS retrieves VFS statistics from a remote host. +// +// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature +// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt. +func (c *Client) StatVFS(path string) (*StatVFS, error) { + // send the StatVFS packet to the server + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpStatvfsPacket{ + ID: id, + Path: path, + }) + if err != nil { + return nil, err + } + + switch typ { + // server responded with valid data + case ssh_FXP_EXTENDED_REPLY: + var response StatVFS + err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) + if err != nil { + return nil, errors.New("can not parse reply") + } + + return &response, nil + + // the resquest failed + case ssh_FXP_STATUS: + return nil, errors.New(fxp(ssh_FXP_STATUS).String()) + + default: + return nil, unimplementedPacketErr(typ) + } +} + +// Join joins any number of path elements into a single path, adding a +// separating slash if necessary. The result is Cleaned; in particular, all +// empty strings are ignored. +func (c *Client) Join(elem ...string) string { return path.Join(elem...) } + +// Remove removes the specified file or directory. An error will be returned if no +// file or directory with the specified path exists, or if the specified directory +// is not empty. +func (c *Client) Remove(path string) error { + err := c.removeFile(path) + if err, ok := err.(*StatusError); ok { + switch err.Code { + // some servers, *cough* osx *cough*, return EPERM, not ENODIR. + // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY + case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY: + return c.RemoveDirectory(path) + } + } + return err +} + +func (c *Client) removeFile(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRemovePacket{ + ID: id, + Filename: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// RemoveDirectory removes a directory path. +func (c *Client) RemoveDirectory(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRmdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// Rename renames a file. +func (c *Client) Rename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// PosixRename renames a file using the posix-rename@openssh.com extension +// which will replace newname if it already exists. +func (c *Client) PosixRename(oldname, newname string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpPosixRenamePacket{ + ID: id, + Oldpath: oldname, + Newpath: newname, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +func (c *Client) realpath(path string) (string, error) { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpRealpathPacket{ + ID: id, + Path: path, + }) + if err != nil { + return "", err + } + switch typ { + case ssh_FXP_NAME: + sid, data := unmarshalUint32(data) + if sid != id { + return "", &unexpectedIDErr{id, sid} + } + count, data := unmarshalUint32(data) + if count != 1 { + return "", unexpectedCount(1, count) + } + filename, _ := unmarshalString(data) // ignore attributes + return filename, nil + case ssh_FXP_STATUS: + return "", normaliseError(unmarshalStatus(id, data)) + default: + return "", unimplementedPacketErr(typ) + } +} + +// Getwd returns the current working directory of the server. Operations +// involving relative paths will be based at this location. +func (c *Client) Getwd() (string, error) { + return c.realpath(".") +} + +// Mkdir creates the specified directory. An error will be returned if a file or +// directory with the specified path already exists, or if the directory's +// parent folder does not exist (the method cannot create complete paths). +func (c *Client) Mkdir(path string) error { + id := c.nextID() + typ, data, err := c.sendPacket(sshFxpMkdirPacket{ + ID: id, + Path: path, + }) + if err != nil { + return err + } + switch typ { + case ssh_FXP_STATUS: + return normaliseError(unmarshalStatus(id, data)) + default: + return unimplementedPacketErr(typ) + } +} + +// applyOptions applies options functions to the Client. +// If an error is encountered, option processing ceases. +func (c *Client) applyOptions(opts ...ClientOption) error { + for _, f := range opts { + if err := f(c); err != nil { + return err + } + } + return nil +} + +// File represents a remote file. +type File struct { + c *Client + path string + handle string + offset uint64 // current offset within remote file +} + +// Close closes the File, rendering it unusable for I/O. It returns an +// error, if any. +func (f *File) Close() error { + return f.c.close(f.handle) +} + +// Name returns the name of the file as presented to Open or Create. +func (f *File) Name() string { + return f.path +} + +const maxConcurrentRequests = 64 + +// Read reads up to len(b) bytes from the File. It returns the number of bytes +// read and an error, if any. Read follows io.Reader semantics, so when Read +// encounters an error or EOF condition after successfully reading n > 0 bytes, +// it returns the number of bytes read. +func (f *File) Read(b []byte) (int, error) { + // Split the read into multiple maxPacket sized concurrent reads + // bounded by maxConcurrentRequests. This allows reads with a suitably + // large buffer to transfer data at a much faster rate due to + // overlapping round trip times. + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + // maxConcurrentRequests buffer to deal with broadcastErr() floods + // also must have a buffer of max value of (desiredInFlight - inFlight) + ch := make(chan result, maxConcurrentRequests) + type inflightRead struct { + b []byte + offset uint64 + } + reqs := map[uint32]inflightRead{} + type offsetErr struct { + offset uint64 + err error + } + var firstErr offsetErr + + sendReq := func(b []byte, offset uint64) { + reqID := f.c.nextID() + f.c.dispatchRequest(ch, sshFxpReadPacket{ + ID: reqID, + Handle: f.handle, + Offset: offset, + Len: uint32(len(b)), + }) + inFlight++ + reqs[reqID] = inflightRead{b: b, offset: offset} + } + + var read int + for len(b) > 0 || inFlight > 0 { + for inFlight < desiredInFlight && len(b) > 0 && firstErr.err == nil { + l := min(len(b), f.c.maxPacket) + rb := b[:l] + sendReq(rb, offset) + offset += uint64(l) + b = b[l:] + } + + if inFlight == 0 { + break + } + res := <-ch + inFlight-- + if res.err != nil { + firstErr = offsetErr{offset: 0, err: res.err} + continue + } + reqID, data := unmarshalUint32(res.data) + req, ok := reqs[reqID] + if !ok { + firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} + continue + } + delete(reqs, reqID) + switch res.typ { + case ssh_FXP_STATUS: + if firstErr.err == nil || req.offset < firstErr.offset { + firstErr = offsetErr{ + offset: req.offset, + err: normaliseError(unmarshalStatus(reqID, res.data)), + } + } + case ssh_FXP_DATA: + l, data := unmarshalUint32(data) + n := copy(req.b, data[:l]) + read += n + if n < len(req.b) { + sendReq(req.b[l:], req.offset+uint64(l)) + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} + } + } + // If the error is anything other than EOF, then there + // may be gaps in the data copied to the buffer so it's + // best to return 0 so the caller can't make any + // incorrect assumptions about the state of the buffer. + if firstErr.err != nil && firstErr.err != io.EOF { + read = 0 + } + f.offset += uint64(read) + return read, firstErr.err +} + +// WriteTo writes the file to w. The return value is the number of bytes +// written. Any error encountered during the write is also returned. +func (f *File) WriteTo(w io.Writer) (int64, error) { + fi, err := f.Stat() + if err != nil { + return 0, err + } + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + writeOffset := offset + fileSize := uint64(fi.Size()) + // see comment on same line in Read() above + ch := make(chan result, maxConcurrentRequests) + type inflightRead struct { + b []byte + offset uint64 + } + reqs := map[uint32]inflightRead{} + pendingWrites := map[uint64][]byte{} + type offsetErr struct { + offset uint64 + err error + } + var firstErr offsetErr + + sendReq := func(b []byte, offset uint64) { + reqID := f.c.nextID() + f.c.dispatchRequest(ch, sshFxpReadPacket{ + ID: reqID, + Handle: f.handle, + Offset: offset, + Len: uint32(len(b)), + }) + inFlight++ + reqs[reqID] = inflightRead{b: b, offset: offset} + } + + var copied int64 + for firstErr.err == nil || inFlight > 0 { + if firstErr.err == nil { + for inFlight+len(pendingWrites) < desiredInFlight { + b := make([]byte, f.c.maxPacket) + sendReq(b, offset) + offset += uint64(f.c.maxPacket) + if offset > fileSize { + desiredInFlight = 1 + } + } + } + + if inFlight == 0 { + if firstErr.err == nil && len(pendingWrites) > 0 { + return copied, InternalInconsistency + } + break + } + res := <-ch + inFlight-- + if res.err != nil { + firstErr = offsetErr{offset: 0, err: res.err} + continue + } + reqID, data := unmarshalUint32(res.data) + req, ok := reqs[reqID] + if !ok { + firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} + continue + } + delete(reqs, reqID) + switch res.typ { + case ssh_FXP_STATUS: + if firstErr.err == nil || req.offset < firstErr.offset { + firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))} + } + case ssh_FXP_DATA: + l, data := unmarshalUint32(data) + if req.offset == writeOffset { + nbytes, err := w.Write(data) + copied += int64(nbytes) + if err != nil { + // We will never receive another DATA with offset==writeOffset, so + // the loop will drain inFlight and then exit. + firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: err} + break + } + if nbytes < int(l) { + firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: io.ErrShortWrite} + break + } + switch { + case offset > fileSize: + desiredInFlight = 1 + case desiredInFlight < maxConcurrentRequests: + desiredInFlight++ + } + writeOffset += uint64(nbytes) + for { + pendingData, ok := pendingWrites[writeOffset] + if !ok { + break + } + // Give go a chance to free the memory. + delete(pendingWrites, writeOffset) + nbytes, err := w.Write(pendingData) + // Do not move writeOffset on error so subsequent iterations won't trigger + // any writes. + if err != nil { + firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: err} + break + } + if nbytes < len(pendingData) { + firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: io.ErrShortWrite} + break + } + writeOffset += uint64(nbytes) + } + } else { + // Don't write the data yet because + // this response came in out of order + // and we need to wait for responses + // for earlier segments of the file. + pendingWrites[req.offset] = data + } + default: + firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} + } + } + if firstErr.err != io.EOF { + return copied, firstErr.err + } + return copied, nil +} + +// Stat returns the FileInfo structure describing file. If there is an +// error. +func (f *File) Stat() (os.FileInfo, error) { + fs, err := f.c.fstat(f.handle) + if err != nil { + return nil, err + } + return fileInfoFromStat(fs, path.Base(f.path)), nil +} + +// Write writes len(b) bytes to the File. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error when n != +// len(b). +func (f *File) Write(b []byte) (int, error) { + // Split the write into multiple maxPacket sized concurrent writes + // bounded by maxConcurrentRequests. This allows writes with a suitably + // large buffer to transfer data at a much faster rate due to + // overlapping round trip times. + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + // see comment on same line in Read() above + ch := make(chan result, maxConcurrentRequests) + var firstErr error + written := len(b) + for len(b) > 0 || inFlight > 0 { + for inFlight < desiredInFlight && len(b) > 0 && firstErr == nil { + l := min(len(b), f.c.maxPacket) + rb := b[:l] + f.c.dispatchRequest(ch, sshFxpWritePacket{ + ID: f.c.nextID(), + Handle: f.handle, + Offset: offset, + Length: uint32(len(rb)), + Data: rb, + }) + inFlight++ + offset += uint64(l) + b = b[l:] + } + + if inFlight == 0 { + break + } + res := <-ch + inFlight-- + if res.err != nil { + firstErr = res.err + continue + } + switch res.typ { + case ssh_FXP_STATUS: + id, _ := unmarshalUint32(res.data) + err := normaliseError(unmarshalStatus(id, res.data)) + if err != nil && firstErr == nil { + firstErr = err + break + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = unimplementedPacketErr(res.typ) + } + } + // If error is non-nil, then there may be gaps in the data written to + // the file so it's best to return 0 so the caller can't make any + // incorrect assumptions about the state of the file. + if firstErr != nil { + written = 0 + } + f.offset += uint64(written) + return written, firstErr +} + +// ReadFrom reads data from r until EOF and writes it to the file. The return +// value is the number of bytes read. Any error except io.EOF encountered +// during the read is also returned. +func (f *File) ReadFrom(r io.Reader) (int64, error) { + inFlight := 0 + desiredInFlight := 1 + offset := f.offset + // see comment on same line in Read() above + ch := make(chan result, maxConcurrentRequests) + var firstErr error + read := int64(0) + b := make([]byte, f.c.maxPacket) + for inFlight > 0 || firstErr == nil { + for inFlight < desiredInFlight && firstErr == nil { + n, err := r.Read(b) + if err != nil { + firstErr = err + } + f.c.dispatchRequest(ch, sshFxpWritePacket{ + ID: f.c.nextID(), + Handle: f.handle, + Offset: offset, + Length: uint32(n), + Data: b[:n], + }) + inFlight++ + offset += uint64(n) + read += int64(n) + } + + if inFlight == 0 { + break + } + res := <-ch + inFlight-- + if res.err != nil { + firstErr = res.err + continue + } + switch res.typ { + case ssh_FXP_STATUS: + id, _ := unmarshalUint32(res.data) + err := normaliseError(unmarshalStatus(id, res.data)) + if err != nil && firstErr == nil { + firstErr = err + break + } + if desiredInFlight < maxConcurrentRequests { + desiredInFlight++ + } + default: + firstErr = unimplementedPacketErr(res.typ) + } + } + if firstErr == io.EOF { + firstErr = nil + } + // If error is non-nil, then there may be gaps in the data written to + // the file so it's best to return 0 so the caller can't make any + // incorrect assumptions about the state of the file. + if firstErr != nil { + read = 0 + } + f.offset += uint64(read) + return read, firstErr +} + +// Seek implements io.Seeker by setting the client offset for the next Read or +// Write. It returns the next offset read. Seeking before or after the end of +// the file is undefined. Seeking relative to the end calls Stat. +func (f *File) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + f.offset = uint64(offset) + case io.SeekCurrent: + f.offset = uint64(int64(f.offset) + offset) + case io.SeekEnd: + fi, err := f.Stat() + if err != nil { + return int64(f.offset), err + } + f.offset = uint64(fi.Size() + offset) + default: + return int64(f.offset), unimplementedSeekWhence(whence) + } + return int64(f.offset), nil +} + +// Chown changes the uid/gid of the current file. +func (f *File) Chown(uid, gid int) error { + return f.c.Chown(f.path, uid, gid) +} + +// Chmod changes the permissions of the current file. +func (f *File) Chmod(mode os.FileMode) error { + return f.c.Chmod(f.path, mode) +} + +// Truncate sets the size of the current file. Although it may be safely assumed +// that if the size is less than its current size it will be truncated to fit, +// the SFTP protocol does not specify what behavior the server should do when setting +// size greater than the current size. +func (f *File) Truncate(size int64) error { + return f.c.Truncate(f.path, size) +} + +func min(a, b int) int { + if a > b { + return b + } + return a +} + +// normaliseError normalises an error into a more standard form that can be +// checked against stdlib errors like io.EOF or os.ErrNotExist. +func normaliseError(err error) error { + switch err := err.(type) { + case *StatusError: + switch err.Code { + case ssh_FX_EOF: + return io.EOF + case ssh_FX_NO_SUCH_FILE: + return os.ErrNotExist + case ssh_FX_OK: + return nil + default: + return err + } + default: + return err + } +} + +func unmarshalStatus(id uint32, data []byte) error { + sid, data := unmarshalUint32(data) + if sid != id { + return &unexpectedIDErr{id, sid} + } + code, data := unmarshalUint32(data) + msg, data, _ := unmarshalStringSafe(data) + lang, _, _ := unmarshalStringSafe(data) + return &StatusError{ + Code: code, + msg: msg, + lang: lang, + } +} + +func marshalStatus(b []byte, err StatusError) []byte { + b = marshalUint32(b, err.Code) + b = marshalString(b, err.msg) + b = marshalString(b, err.lang) + return b +} + +// flags converts the flags passed to OpenFile into ssh flags. +// Unsupported flags are ignored. +func flags(f int) uint32 { + var out uint32 + switch f & os.O_WRONLY { + case os.O_WRONLY: + out |= ssh_FXF_WRITE + case os.O_RDONLY: + out |= ssh_FXF_READ + } + if f&os.O_RDWR == os.O_RDWR { + out |= ssh_FXF_READ | ssh_FXF_WRITE + } + if f&os.O_APPEND == os.O_APPEND { + out |= ssh_FXF_APPEND + } + if f&os.O_CREATE == os.O_CREATE { + out |= ssh_FXF_CREAT + } + if f&os.O_TRUNC == os.O_TRUNC { + out |= ssh_FXF_TRUNC + } + if f&os.O_EXCL == os.O_EXCL { + out |= ssh_FXF_EXCL + } + return out +} diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go new file mode 100644 index 00000000..f799715e --- /dev/null +++ b/vendor/github.com/pkg/sftp/conn.go @@ -0,0 +1,133 @@ +package sftp + +import ( + "encoding" + "io" + "sync" + + "github.com/pkg/errors" +) + +// conn implements a bidirectional channel on which client and server +// connections are multiplexed. +type conn struct { + io.Reader + io.WriteCloser + sync.Mutex // used to serialise writes to sendPacket + // sendPacketTest is needed to replicate packet issues in testing + sendPacketTest func(w io.Writer, m encoding.BinaryMarshaler) error +} + +func (c *conn) recvPacket() (uint8, []byte, error) { + return recvPacket(c) +} + +func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { + c.Lock() + defer c.Unlock() + if c.sendPacketTest != nil { + return c.sendPacketTest(c, m) + } + return sendPacket(c, m) +} + +type clientConn struct { + conn + wg sync.WaitGroup + sync.Mutex // protects inflight + inflight map[uint32]chan<- result // outstanding requests +} + +// Close closes the SFTP session. +func (c *clientConn) Close() error { + defer c.wg.Wait() + return c.conn.Close() +} + +func (c *clientConn) loop() { + defer c.wg.Done() + err := c.recv() + if err != nil { + c.broadcastErr(err) + } +} + +// recv continuously reads from the server and forwards responses to the +// appropriate channel. +func (c *clientConn) recv() error { + defer func() { + c.conn.Lock() + c.conn.Close() + c.conn.Unlock() + }() + for { + typ, data, err := c.recvPacket() + if err != nil { + return err + } + sid, _ := unmarshalUint32(data) + c.Lock() + ch, ok := c.inflight[sid] + delete(c.inflight, sid) + c.Unlock() + if !ok { + // This is an unexpected occurrence. Send the error + // back to all listeners so that they terminate + // gracefully. + return errors.Errorf("sid: %v not fond", sid) + } + ch <- result{typ: typ, data: data} + } +} + +// result captures the result of receiving the a packet from the server +type result struct { + typ byte + data []byte + err error +} + +type idmarshaler interface { + id() uint32 + encoding.BinaryMarshaler +} + +func (c *clientConn) sendPacket(p idmarshaler) (byte, []byte, error) { + ch := make(chan result, 2) + c.dispatchRequest(ch, p) + s := <-ch + return s.typ, s.data, s.err +} + +func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) { + c.Lock() + c.inflight[p.id()] = ch + c.Unlock() + if err := c.conn.sendPacket(p); err != nil { + c.Lock() + delete(c.inflight, p.id()) + c.Unlock() + ch <- result{err: err} + } +} + +// broadcastErr sends an error to all goroutines waiting for a response. +func (c *clientConn) broadcastErr(err error) { + c.Lock() + listeners := make([]chan<- result, 0, len(c.inflight)) + for _, ch := range c.inflight { + listeners = append(listeners, ch) + } + c.Unlock() + for _, ch := range listeners { + ch <- result{err: err} + } +} + +type serverConn struct { + conn +} + +func (s *serverConn) sendError(p ider, err error) error { + return s.sendPacket(statusFromError(p, err)) +} diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go new file mode 100644 index 00000000..3e264abe --- /dev/null +++ b/vendor/github.com/pkg/sftp/debug.go @@ -0,0 +1,9 @@ +// +build debug + +package sftp + +import "log" + +func debug(fmt string, args ...interface{}) { + log.Printf(fmt, args...) +} diff --git a/vendor/github.com/pkg/sftp/examples/buffered-read-benchmark/main.go b/vendor/github.com/pkg/sftp/examples/buffered-read-benchmark/main.go new file mode 100644 index 00000000..36ac6d72 --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/buffered-read-benchmark/main.go @@ -0,0 +1,78 @@ +// buffered-read-benchmark benchmarks the peformance of reading +// from /dev/zero on the server to a []byte on the client via io.Copy. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + "github.com/pkg/sftp" +) + +var ( + USER = flag.String("user", os.Getenv("USER"), "ssh username") + HOST = flag.String("host", "localhost", "ssh server hostname") + PORT = flag.Int("port", 22, "ssh server port") + PASS = flag.String("pass", os.Getenv("SOCKSIE_SSH_PASSWORD"), "ssh password") + SIZE = flag.Int("s", 1<<15, "set max packet size") +) + +func init() { + flag.Parse() +} + +func main() { + var auths []ssh.AuthMethod + if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { + auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) + + } + if *PASS != "" { + auths = append(auths, ssh.Password(*PASS)) + } + + config := ssh.ClientConfig{ + User: *USER, + Auth: auths, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + addr := fmt.Sprintf("%s:%d", *HOST, *PORT) + conn, err := ssh.Dial("tcp", addr, &config) + if err != nil { + log.Fatalf("unable to connect to [%s]: %v", addr, err) + } + defer conn.Close() + + c, err := sftp.NewClient(conn, sftp.MaxPacket(*SIZE)) + if err != nil { + log.Fatalf("unable to start sftp subsytem: %v", err) + } + defer c.Close() + + r, err := c.Open("/dev/zero") + if err != nil { + log.Fatal(err) + } + defer r.Close() + + const size = 1e9 + + log.Printf("reading %v bytes", size) + t1 := time.Now() + n, err := io.ReadFull(r, make([]byte, size)) + if err != nil { + log.Fatal(err) + } + if n != size { + log.Fatalf("copy: expected %v bytes, got %d", size, n) + } + log.Printf("read %v bytes in %s", size, time.Since(t1)) +} diff --git a/vendor/github.com/pkg/sftp/examples/buffered-write-benchmark/main.go b/vendor/github.com/pkg/sftp/examples/buffered-write-benchmark/main.go new file mode 100644 index 00000000..d1babedb --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/buffered-write-benchmark/main.go @@ -0,0 +1,84 @@ +// buffered-write-benchmark benchmarks the peformance of writing +// a single large []byte on the client to /dev/null on the server via io.Copy. +package main + +import ( + "flag" + "fmt" + "log" + "net" + "os" + "syscall" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + "github.com/pkg/sftp" +) + +var ( + USER = flag.String("user", os.Getenv("USER"), "ssh username") + HOST = flag.String("host", "localhost", "ssh server hostname") + PORT = flag.Int("port", 22, "ssh server port") + PASS = flag.String("pass", os.Getenv("SOCKSIE_SSH_PASSWORD"), "ssh password") + SIZE = flag.Int("s", 1<<15, "set max packet size") +) + +func init() { + flag.Parse() +} + +func main() { + var auths []ssh.AuthMethod + if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { + auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) + + } + if *PASS != "" { + auths = append(auths, ssh.Password(*PASS)) + } + + config := ssh.ClientConfig{ + User: *USER, + Auth: auths, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + addr := fmt.Sprintf("%s:%d", *HOST, *PORT) + conn, err := ssh.Dial("tcp", addr, &config) + if err != nil { + log.Fatalf("unable to connect to [%s]: %v", addr, err) + } + defer conn.Close() + + c, err := sftp.NewClient(conn, sftp.MaxPacket(*SIZE)) + if err != nil { + log.Fatalf("unable to start sftp subsytem: %v", err) + } + defer c.Close() + + w, err := c.OpenFile("/dev/null", syscall.O_WRONLY) + if err != nil { + log.Fatal(err) + } + defer w.Close() + + f, err := os.Open("/dev/zero") + if err != nil { + log.Fatal(err) + } + defer f.Close() + + const size = 1e9 + + log.Printf("writing %v bytes", size) + t1 := time.Now() + n, err := w.Write(make([]byte, size)) + if err != nil { + log.Fatal(err) + } + if n != size { + log.Fatalf("copy: expected %v bytes, got %d", size, n) + } + log.Printf("wrote %v bytes in %s", size, time.Since(t1)) +} diff --git a/vendor/github.com/pkg/sftp/examples/request-server/main.go b/vendor/github.com/pkg/sftp/examples/request-server/main.go new file mode 100644 index 00000000..fd21b43e --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/request-server/main.go @@ -0,0 +1,131 @@ +// An example SFTP server implementation using the golang SSH package. +// Serves the whole filesystem visible to the user, and has a hard-coded username and password, +// so not for real use! +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" +) + +// Based on example server code from golang.org/x/crypto/ssh and server_standalone +func main() { + + var ( + readOnly bool + debugStderr bool + ) + + flag.BoolVar(&readOnly, "R", false, "read-only server") + flag.BoolVar(&debugStderr, "e", false, "debug to stderr") + flag.Parse() + + debugStream := ioutil.Discard + if debugStderr { + debugStream = os.Stderr + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + fmt.Fprintf(debugStream, "Login: %s\n", c.User()) + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("id_rsa") + if err != nil { + log.Fatal("Failed to load private key", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key", err) + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection", err) + } + fmt.Printf("Listening on %v\n", listener.Addr()) + + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection", err) + } + + // Before use, a handshake must be performed on the incoming net.Conn. + sconn, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake", err) + } + log.Println("login detected:", sconn.User()) + fmt.Fprintf(debugStream, "SSH server established\n") + + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of an SFTP session, this is "subsystem" + // with a payload string of "<length=4>sftp" + fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType()) + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType()) + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatal("could not accept channel.", err) + } + fmt.Fprintf(debugStream, "Channel accepted\n") + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "subsystem" request. + go func(in <-chan *ssh.Request) { + for req := range in { + fmt.Fprintf(debugStream, "Request: %v\n", req.Type) + ok := false + switch req.Type { + case "subsystem": + fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:]) + if string(req.Payload[4:]) == "sftp" { + ok = true + } + } + fmt.Fprintf(debugStream, " - accepted: %v\n", ok) + req.Reply(ok, nil) + } + }(requests) + + root := sftp.InMemHandler() + server := sftp.NewRequestServer(channel, root) + if err := server.Serve(); err == io.EOF { + server.Close() + log.Print("sftp client exited session.") + } else if err != nil { + log.Fatal("sftp server completed with error:", err) + } + } +} diff --git a/vendor/github.com/pkg/sftp/examples/sftp-server/main.go b/vendor/github.com/pkg/sftp/examples/sftp-server/main.go new file mode 100644 index 00000000..48e0e868 --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/sftp-server/main.go @@ -0,0 +1,147 @@ +// An example SFTP server implementation using the golang SSH package. +// Serves the whole filesystem visible to the user, and has a hard-coded username and password, +// so not for real use! +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" +) + +// Based on example server code from golang.org/x/crypto/ssh and server_standalone +func main() { + + var ( + readOnly bool + debugStderr bool + ) + + flag.BoolVar(&readOnly, "R", false, "read-only server") + flag.BoolVar(&debugStderr, "e", false, "debug to stderr") + flag.Parse() + + debugStream := ioutil.Discard + if debugStderr { + debugStream = os.Stderr + } + + // An SSH server is represented by a ServerConfig, which holds + // certificate details and handles authentication of ServerConns. + config := &ssh.ServerConfig{ + PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + // Should use constant-time compare (or better, salt+hash) in + // a production setting. + fmt.Fprintf(debugStream, "Login: %s\n", c.User()) + if c.User() == "testuser" && string(pass) == "tiger" { + return nil, nil + } + return nil, fmt.Errorf("password rejected for %q", c.User()) + }, + } + + privateBytes, err := ioutil.ReadFile("id_rsa") + if err != nil { + log.Fatal("Failed to load private key", err) + } + + private, err := ssh.ParsePrivateKey(privateBytes) + if err != nil { + log.Fatal("Failed to parse private key", err) + } + + config.AddHostKey(private) + + // Once a ServerConfig has been configured, connections can be + // accepted. + listener, err := net.Listen("tcp", "0.0.0.0:2022") + if err != nil { + log.Fatal("failed to listen for connection", err) + } + fmt.Printf("Listening on %v\n", listener.Addr()) + + nConn, err := listener.Accept() + if err != nil { + log.Fatal("failed to accept incoming connection", err) + } + + // Before use, a handshake must be performed on the incoming + // net.Conn. + _, chans, reqs, err := ssh.NewServerConn(nConn, config) + if err != nil { + log.Fatal("failed to handshake", err) + } + fmt.Fprintf(debugStream, "SSH server established\n") + + // The incoming Request channel must be serviced. + go ssh.DiscardRequests(reqs) + + // Service the incoming Channel channel. + for newChannel := range chans { + // Channels have a type, depending on the application level + // protocol intended. In the case of an SFTP session, this is "subsystem" + // with a payload string of "<length=4>sftp" + fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType()) + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType()) + continue + } + channel, requests, err := newChannel.Accept() + if err != nil { + log.Fatal("could not accept channel.", err) + } + fmt.Fprintf(debugStream, "Channel accepted\n") + + // Sessions have out-of-band requests such as "shell", + // "pty-req" and "env". Here we handle only the + // "subsystem" request. + go func(in <-chan *ssh.Request) { + for req := range in { + fmt.Fprintf(debugStream, "Request: %v\n", req.Type) + ok := false + switch req.Type { + case "subsystem": + fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:]) + if string(req.Payload[4:]) == "sftp" { + ok = true + } + } + fmt.Fprintf(debugStream, " - accepted: %v\n", ok) + req.Reply(ok, nil) + } + }(requests) + + serverOptions := []sftp.ServerOption{ + sftp.WithDebug(debugStream), + } + + if readOnly { + serverOptions = append(serverOptions, sftp.ReadOnly()) + fmt.Fprintf(debugStream, "Read-only server\n") + } else { + fmt.Fprintf(debugStream, "Read write server\n") + } + + server, err := sftp.NewServer( + channel, + serverOptions..., + ) + if err != nil { + log.Fatal(err) + } + if err := server.Serve(); err == io.EOF { + server.Close() + log.Print("sftp client exited session.") + } else if err != nil { + log.Fatal("sftp server completed with error:", err) + } + } +} diff --git a/vendor/github.com/pkg/sftp/examples/streaming-read-benchmark/main.go b/vendor/github.com/pkg/sftp/examples/streaming-read-benchmark/main.go new file mode 100644 index 00000000..87afc5a3 --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/streaming-read-benchmark/main.go @@ -0,0 +1,85 @@ +// streaming-read-benchmark benchmarks the peformance of reading +// from /dev/zero on the server to /dev/null on the client via io.Copy. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "syscall" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + "github.com/pkg/sftp" +) + +var ( + USER = flag.String("user", os.Getenv("USER"), "ssh username") + HOST = flag.String("host", "localhost", "ssh server hostname") + PORT = flag.Int("port", 22, "ssh server port") + PASS = flag.String("pass", os.Getenv("SOCKSIE_SSH_PASSWORD"), "ssh password") + SIZE = flag.Int("s", 1<<15, "set max packet size") +) + +func init() { + flag.Parse() +} + +func main() { + var auths []ssh.AuthMethod + if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { + auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) + + } + if *PASS != "" { + auths = append(auths, ssh.Password(*PASS)) + } + + config := ssh.ClientConfig{ + User: *USER, + Auth: auths, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + addr := fmt.Sprintf("%s:%d", *HOST, *PORT) + conn, err := ssh.Dial("tcp", addr, &config) + if err != nil { + log.Fatalf("unable to connect to [%s]: %v", addr, err) + } + defer conn.Close() + + c, err := sftp.NewClient(conn, sftp.MaxPacket(*SIZE)) + if err != nil { + log.Fatalf("unable to start sftp subsytem: %v", err) + } + defer c.Close() + + r, err := c.Open("/dev/zero") + if err != nil { + log.Fatal(err) + } + defer r.Close() + + w, err := os.OpenFile("/dev/null", syscall.O_WRONLY, 0600) + if err != nil { + log.Fatal(err) + } + defer w.Close() + + const size int64 = 1e9 + + log.Printf("reading %v bytes", size) + t1 := time.Now() + n, err := io.Copy(w, io.LimitReader(r, size)) + if err != nil { + log.Fatal(err) + } + if n != size { + log.Fatalf("copy: expected %v bytes, got %d", size, n) + } + log.Printf("read %v bytes in %s", size, time.Since(t1)) +} diff --git a/vendor/github.com/pkg/sftp/examples/streaming-write-benchmark/main.go b/vendor/github.com/pkg/sftp/examples/streaming-write-benchmark/main.go new file mode 100644 index 00000000..8f432d39 --- /dev/null +++ b/vendor/github.com/pkg/sftp/examples/streaming-write-benchmark/main.go @@ -0,0 +1,85 @@ +// streaming-write-benchmark benchmarks the peformance of writing +// from /dev/zero on the client to /dev/null on the server via io.Copy. +package main + +import ( + "flag" + "fmt" + "io" + "log" + "net" + "os" + "syscall" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + + "github.com/pkg/sftp" +) + +var ( + USER = flag.String("user", os.Getenv("USER"), "ssh username") + HOST = flag.String("host", "localhost", "ssh server hostname") + PORT = flag.Int("port", 22, "ssh server port") + PASS = flag.String("pass", os.Getenv("SOCKSIE_SSH_PASSWORD"), "ssh password") + SIZE = flag.Int("s", 1<<15, "set max packet size") +) + +func init() { + flag.Parse() +} + +func main() { + var auths []ssh.AuthMethod + if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil { + auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers)) + + } + if *PASS != "" { + auths = append(auths, ssh.Password(*PASS)) + } + + config := ssh.ClientConfig{ + User: *USER, + Auth: auths, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + addr := fmt.Sprintf("%s:%d", *HOST, *PORT) + conn, err := ssh.Dial("tcp", addr, &config) + if err != nil { + log.Fatalf("unable to connect to [%s]: %v", addr, err) + } + defer conn.Close() + + c, err := sftp.NewClient(conn, sftp.MaxPacket(*SIZE)) + if err != nil { + log.Fatalf("unable to start sftp subsytem: %v", err) + } + defer c.Close() + + w, err := c.OpenFile("/dev/null", syscall.O_WRONLY) + if err != nil { + log.Fatal(err) + } + defer w.Close() + + f, err := os.Open("/dev/zero") + if err != nil { + log.Fatal(err) + } + defer f.Close() + + const size int64 = 1e9 + + log.Printf("writing %v bytes", size) + t1 := time.Now() + n, err := io.Copy(w, io.LimitReader(f, size)) + if err != nil { + log.Fatal(err) + } + if n != size { + log.Fatalf("copy: expected %v bytes, got %d", size, n) + } + log.Printf("wrote %v bytes in %s", size, time.Since(t1)) +} diff --git a/vendor/github.com/pkg/sftp/match.go b/vendor/github.com/pkg/sftp/match.go new file mode 100644 index 00000000..e2f2ba40 --- /dev/null +++ b/vendor/github.com/pkg/sftp/match.go @@ -0,0 +1,295 @@ +package sftp + +import ( + "path" + "strings" + "unicode/utf8" +) + +// ErrBadPattern indicates a globbing pattern was malformed. +var ErrBadPattern = path.ErrBadPattern + +// Unix separator +const separator = "/" + +// Match reports whether name matches the shell file name pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-Separator characters +// '?' matches any single non-Separator character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// +func Match(pattern, name string) (matched bool, err error) { + return path.Match(pattern, name) +} + +// detect if byte(char) is path separator +func isPathSeparator(c byte) bool { + return string(c) == "/" +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + for len(chunk) > 0 { + if len(s) == 0 { + return + } + switch chunk[0] { + case '[': + // character class + r, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + // We can't end right after '[', we're expecting at least + // a closing bracket and possibly a caret. + if len(chunk) == 0 { + err = ErrBadPattern + return + } + // possibly negated + negated := chunk[0] == '^' + if negated { + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + return + } + + case '?': + if isPathSeparator(s[0]) { + return + } + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + err = ErrBadPattern + return + } + fallthrough + + default: + if chunk[0] != s[0] { + return + } + s = s[1:] + chunk = chunk[1:] + } + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = ErrBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = ErrBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = ErrBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = ErrBadPattern + } + return +} + +// Split splits path immediately following the final Separator, +// separating it into a directory and file name component. +// If there is no Separator in path, Split returns an empty dir +// and file set to path. +// The returned values have the property that path = dir+file. +func Split(path string) (dir, file string) { + i := len(path) - 1 + for i >= 0 && !isPathSeparator(path[i]) { + i-- + } + return path[:i+1], path[i+1:] +} + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +func (c *Client) Glob(pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + file, err := c.Lstat(pattern) + if err != nil { + return nil, nil + } + dir, _ := Split(pattern) + dir = cleanGlobPath(dir) + return []string{Join(dir, file.Name())}, nil + } + + dir, file := Split(pattern) + dir = cleanGlobPath(dir) + + if !hasMeta(dir) { + return c.glob(dir, file, nil) + } + + // Prevent infinite recursion. See issue 15879. + if dir == pattern { + return nil, ErrBadPattern + } + + var m []string + m, err = c.Glob(dir) + if err != nil { + return + } + for _, d := range m { + matches, err = c.glob(d, file, matches) + if err != nil { + return + } + } + return +} + +// cleanGlobPath prepares path for glob matching. +func cleanGlobPath(path string) string { + switch path { + case "": + return "." + case string(separator): + // do nothing to the path + return path + default: + return path[0 : len(path)-1] // chop off trailing separator + } +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func (c *Client) glob(dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := c.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + names, err := c.ReadDir(dir) + if err != nil { + return + } + //sort.Strings(names) + + for _, n := range names { + matched, err := Match(pattern, n.Name()) + if err != nil { + return m, err + } + if matched { + m = append(m, Join(dir, n.Name())) + } + } + return +} + +// Join joins any number of path elements into a single path, adding +// a Separator if necessary. +// all empty strings are ignored. +func Join(elem ...string) string { + return path.Join(elem...) +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.ContainsAny(path, "*?[") +} diff --git a/vendor/github.com/pkg/sftp/packet-manager.go b/vendor/github.com/pkg/sftp/packet-manager.go new file mode 100644 index 00000000..bf822e67 --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet-manager.go @@ -0,0 +1,173 @@ +package sftp + +import ( + "encoding" + "sort" + "sync" +) + +// The goal of the packetManager is to keep the outgoing packets in the same +// order as the incoming. This is due to some sftp clients requiring this +// behavior (eg. winscp). + +type packetSender interface { + sendPacket(encoding.BinaryMarshaler) error +} + +type packetManager struct { + requests chan requestPacket + responses chan responsePacket + fini chan struct{} + incoming requestPacketIDs + outgoing responsePackets + sender packetSender // connection object + working *sync.WaitGroup +} + +func newPktMgr(sender packetSender) *packetManager { + s := &packetManager{ + requests: make(chan requestPacket, SftpServerWorkerCount), + responses: make(chan responsePacket, SftpServerWorkerCount), + fini: make(chan struct{}), + incoming: make([]uint32, 0, SftpServerWorkerCount), + outgoing: make([]responsePacket, 0, SftpServerWorkerCount), + sender: sender, + working: &sync.WaitGroup{}, + } + go s.controller() + return s +} + +type responsePackets []responsePacket + +func (r responsePackets) Sort() { + sort.Slice(r, func(i, j int) bool { + return r[i].id() < r[j].id() + }) +} + +type requestPacketIDs []uint32 + +func (r requestPacketIDs) Sort() { + sort.Slice(r, func(i, j int) bool { + return r[i] < r[j] + }) +} + +// register incoming packets to be handled +// send id of 0 for packets without id +func (s *packetManager) incomingPacket(pkt requestPacket) { + s.working.Add(1) + s.requests <- pkt // buffer == SftpServerWorkerCount +} + +// register outgoing packets as being ready +func (s *packetManager) readyPacket(pkt responsePacket) { + s.responses <- pkt + s.working.Done() +} + +// shut down packetManager controller +func (s *packetManager) close() { + // pause until current packets are processed + s.working.Wait() + close(s.fini) +} + +// Passed a worker function, returns a channel for incoming packets. +// The goal is to process packets in the order they are received as is +// requires by section 7 of the RFC, while maximizing throughput of file +// transfers. +func (s *packetManager) workerChan(runWorker func(requestChan)) requestChan { + + rwChan := make(chan requestPacket, SftpServerWorkerCount) + for i := 0; i < SftpServerWorkerCount; i++ { + runWorker(rwChan) + } + + cmdChan := make(chan requestPacket) + runWorker(cmdChan) + + pktChan := make(chan requestPacket, SftpServerWorkerCount) + go func() { + // start with cmdChan + curChan := cmdChan + for pkt := range pktChan { + // on file open packet, switch to rwChan + switch pkt.(type) { + case *sshFxpOpenPacket: + curChan = rwChan + // on file close packet, switch back to cmdChan + // after waiting for any reads/writes to finish + case *sshFxpClosePacket: + // wait for rwChan to finish + s.working.Wait() + // stop using rwChan + curChan = cmdChan + } + s.incomingPacket(pkt) + curChan <- pkt + } + close(rwChan) + close(cmdChan) + s.close() + }() + + return pktChan +} + +// process packets +func (s *packetManager) controller() { + for { + select { + case pkt := <-s.requests: + debug("incoming id: %v", pkt.id()) + s.incoming = append(s.incoming, pkt.id()) + if len(s.incoming) > 1 { + s.incoming.Sort() + } + case pkt := <-s.responses: + debug("outgoing pkt: %v", pkt.id()) + s.outgoing = append(s.outgoing, pkt) + if len(s.outgoing) > 1 { + s.outgoing.Sort() + } + case <-s.fini: + return + } + s.maybeSendPackets() + } +} + +// send as many packets as are ready +func (s *packetManager) maybeSendPackets() { + for { + if len(s.outgoing) == 0 || len(s.incoming) == 0 { + debug("break! -- outgoing: %v; incoming: %v", + len(s.outgoing), len(s.incoming)) + break + } + out := s.outgoing[0] + in := s.incoming[0] + // debug("incoming: %v", s.incoming) + // debug("outgoing: %v", outfilter(s.outgoing)) + if in == out.id() { + s.sender.sendPacket(out) + // pop off heads + copy(s.incoming, s.incoming[1:]) // shift left + s.incoming = s.incoming[:len(s.incoming)-1] // remove last + copy(s.outgoing, s.outgoing[1:]) // shift left + s.outgoing = s.outgoing[:len(s.outgoing)-1] // remove last + } else { + break + } + } +} + +//func outfilter(o []responsePacket) []uint32 { +// res := make([]uint32, 0, len(o)) +// for _, v := range o { +// res = append(res, v.id()) +// } +// return res +//} diff --git a/vendor/github.com/pkg/sftp/packet-typing.go b/vendor/github.com/pkg/sftp/packet-typing.go new file mode 100644 index 00000000..9001aea6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet-typing.go @@ -0,0 +1,133 @@ +package sftp + +import ( + "encoding" + + "github.com/pkg/errors" +) + +// all incoming packets +type requestPacket interface { + encoding.BinaryUnmarshaler + id() uint32 +} + +type requestChan chan requestPacket + +type responsePacket interface { + encoding.BinaryMarshaler + id() uint32 +} + +// interfaces to group types +type hasPath interface { + requestPacket + getPath() string +} + +type hasHandle interface { + requestPacket + getHandle() string +} + +type notReadOnly interface { + notReadOnly() +} + +//// define types by adding methods +// hasPath +func (p sshFxpLstatPacket) getPath() string { return p.Path } +func (p sshFxpStatPacket) getPath() string { return p.Path } +func (p sshFxpRmdirPacket) getPath() string { return p.Path } +func (p sshFxpReadlinkPacket) getPath() string { return p.Path } +func (p sshFxpRealpathPacket) getPath() string { return p.Path } +func (p sshFxpMkdirPacket) getPath() string { return p.Path } +func (p sshFxpSetstatPacket) getPath() string { return p.Path } +func (p sshFxpStatvfsPacket) getPath() string { return p.Path } +func (p sshFxpRemovePacket) getPath() string { return p.Filename } +func (p sshFxpRenamePacket) getPath() string { return p.Oldpath } +func (p sshFxpSymlinkPacket) getPath() string { return p.Targetpath } +func (p sshFxpOpendirPacket) getPath() string { return p.Path } +func (p sshFxpOpenPacket) getPath() string { return p.Path } + +func (p sshFxpExtendedPacketPosixRename) getPath() string { return p.Oldpath } + +// hasHandle +func (p sshFxpFstatPacket) getHandle() string { return p.Handle } +func (p sshFxpFsetstatPacket) getHandle() string { return p.Handle } +func (p sshFxpReadPacket) getHandle() string { return p.Handle } +func (p sshFxpWritePacket) getHandle() string { return p.Handle } +func (p sshFxpReaddirPacket) getHandle() string { return p.Handle } +func (p sshFxpClosePacket) getHandle() string { return p.Handle } + +// notReadOnly +func (p sshFxpWritePacket) notReadOnly() {} +func (p sshFxpSetstatPacket) notReadOnly() {} +func (p sshFxpFsetstatPacket) notReadOnly() {} +func (p sshFxpRemovePacket) notReadOnly() {} +func (p sshFxpMkdirPacket) notReadOnly() {} +func (p sshFxpRmdirPacket) notReadOnly() {} +func (p sshFxpRenamePacket) notReadOnly() {} +func (p sshFxpSymlinkPacket) notReadOnly() {} +func (p sshFxpExtendedPacketPosixRename) notReadOnly() {} + +// some packets with ID are missing id() +func (p sshFxpDataPacket) id() uint32 { return p.ID } +func (p sshFxpStatusPacket) id() uint32 { return p.ID } +func (p sshFxpStatResponse) id() uint32 { return p.ID } +func (p sshFxpNamePacket) id() uint32 { return p.ID } +func (p sshFxpHandlePacket) id() uint32 { return p.ID } +func (p sshFxVersionPacket) id() uint32 { return 0 } + +// take raw incoming packet data and build packet objects +func makePacket(p rxPacket) (requestPacket, error) { + var pkt requestPacket + switch p.pktType { + case ssh_FXP_INIT: + pkt = &sshFxInitPacket{} + case ssh_FXP_LSTAT: + pkt = &sshFxpLstatPacket{} + case ssh_FXP_OPEN: + pkt = &sshFxpOpenPacket{} + case ssh_FXP_CLOSE: + pkt = &sshFxpClosePacket{} + case ssh_FXP_READ: + pkt = &sshFxpReadPacket{} + case ssh_FXP_WRITE: + pkt = &sshFxpWritePacket{} + case ssh_FXP_FSTAT: + pkt = &sshFxpFstatPacket{} + case ssh_FXP_SETSTAT: + pkt = &sshFxpSetstatPacket{} + case ssh_FXP_FSETSTAT: + pkt = &sshFxpFsetstatPacket{} + case ssh_FXP_OPENDIR: + pkt = &sshFxpOpendirPacket{} + case ssh_FXP_READDIR: + pkt = &sshFxpReaddirPacket{} + case ssh_FXP_REMOVE: + pkt = &sshFxpRemovePacket{} + case ssh_FXP_MKDIR: + pkt = &sshFxpMkdirPacket{} + case ssh_FXP_RMDIR: + pkt = &sshFxpRmdirPacket{} + case ssh_FXP_REALPATH: + pkt = &sshFxpRealpathPacket{} + case ssh_FXP_STAT: + pkt = &sshFxpStatPacket{} + case ssh_FXP_RENAME: + pkt = &sshFxpRenamePacket{} + case ssh_FXP_READLINK: + pkt = &sshFxpReadlinkPacket{} + case ssh_FXP_SYMLINK: + pkt = &sshFxpSymlinkPacket{} + case ssh_FXP_EXTENDED: + pkt = &sshFxpExtendedPacket{} + default: + return nil, errors.Errorf("unhandled packet type: %s", p.pktType) + } + if err := pkt.UnmarshalBinary(p.pktBytes); err != nil { + return nil, err + } + return pkt, nil +} diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go new file mode 100644 index 00000000..711ca50c --- /dev/null +++ b/vendor/github.com/pkg/sftp/packet.go @@ -0,0 +1,952 @@ +package sftp + +import ( + "bytes" + "encoding" + "encoding/binary" + "fmt" + "io" + "os" + "reflect" + + "github.com/pkg/errors" +) + +var ( + errShortPacket = errors.New("packet too short") + errUnknownExtendedPacket = errors.New("unknown extended packet") +) + +const ( + debugDumpTxPacket = false + debugDumpRxPacket = false + debugDumpTxPacketBytes = false + debugDumpRxPacketBytes = false +) + +func marshalUint32(b []byte, v uint32) []byte { + return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +func marshalUint64(b []byte, v uint64) []byte { + return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v)) +} + +func marshalString(b []byte, v string) []byte { + return append(marshalUint32(b, uint32(len(v))), v...) +} + +func marshal(b []byte, v interface{}) []byte { + if v == nil { + return b + } + switch v := v.(type) { + case uint8: + return append(b, v) + case uint32: + return marshalUint32(b, v) + case uint64: + return marshalUint64(b, v) + case string: + return marshalString(b, v) + case os.FileInfo: + return marshalFileInfo(b, v) + default: + switch d := reflect.ValueOf(v); d.Kind() { + case reflect.Struct: + for i, n := 0, d.NumField(); i < n; i++ { + b = append(marshal(b, d.Field(i).Interface())) + } + return b + case reflect.Slice: + for i, n := 0, d.Len(); i < n; i++ { + b = append(marshal(b, d.Index(i).Interface())) + } + return b + default: + panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v)) + } + } +} + +func unmarshalUint32(b []byte) (uint32, []byte) { + v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + return v, b[4:] +} + +func unmarshalUint32Safe(b []byte) (uint32, []byte, error) { + var v uint32 + if len(b) < 4 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint32(b) + return v, b, nil +} + +func unmarshalUint64(b []byte) (uint64, []byte) { + h, b := unmarshalUint32(b) + l, b := unmarshalUint32(b) + return uint64(h)<<32 | uint64(l), b +} + +func unmarshalUint64Safe(b []byte) (uint64, []byte, error) { + var v uint64 + if len(b) < 8 { + return 0, nil, errShortPacket + } + v, b = unmarshalUint64(b) + return v, b, nil +} + +func unmarshalString(b []byte) (string, []byte) { + n, b := unmarshalUint32(b) + return string(b[:n]), b[n:] +} + +func unmarshalStringSafe(b []byte) (string, []byte, error) { + n, b, err := unmarshalUint32Safe(b) + if err != nil { + return "", nil, err + } + if int64(n) > int64(len(b)) { + return "", nil, errShortPacket + } + return string(b[:n]), b[n:], nil +} + +// sendPacket marshals p according to RFC 4234. +func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { + bb, err := m.MarshalBinary() + if err != nil { + return errors.Errorf("binary marshaller failed: %v", err) + } + if debugDumpTxPacketBytes { + debug("send packet: %s %d bytes %x", fxp(bb[0]), len(bb), bb[1:]) + } else if debugDumpTxPacket { + debug("send packet: %s %d bytes", fxp(bb[0]), len(bb)) + } + l := uint32(len(bb)) + hdr := []byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)} + _, err = w.Write(hdr) + if err != nil { + return errors.Errorf("failed to send packet header: %v", err) + } + _, err = w.Write(bb) + if err != nil { + return errors.Errorf("failed to send packet body: %v", err) + } + return nil +} + +func recvPacket(r io.Reader) (uint8, []byte, error) { + var b = []byte{0, 0, 0, 0} + if _, err := io.ReadFull(r, b); err != nil { + return 0, nil, err + } + l, _ := unmarshalUint32(b) + b = make([]byte, l) + if _, err := io.ReadFull(r, b); err != nil { + debug("recv packet %d bytes: err %v", l, err) + return 0, nil, err + } + if debugDumpRxPacketBytes { + debug("recv packet: %s %d bytes %x", fxp(b[0]), l, b[1:]) + } else if debugDumpRxPacket { + debug("recv packet: %s %d bytes", fxp(b[0]), l) + } + return b[0], b[1:], nil +} + +type extensionPair struct { + Name string + Data string +} + +func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) { + var ep extensionPair + var err error + ep.Name, b, err = unmarshalStringSafe(b) + if err != nil { + return ep, b, err + } + ep.Data, b, err = unmarshalStringSafe(b) + return ep, b, err +} + +// Here starts the definition of packets along with their MarshalBinary +// implementations. +// Manually writing the marshalling logic wins us a lot of time and +// allocation. + +type sshFxInitPacket struct { + Version uint32 + Extensions []extensionPair +} + +func (p sshFxInitPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 // byte + uint32 + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_INIT) + b = marshalUint32(b, p.Version) + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + return b, nil +} + +func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { + var err error + if p.Version, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + for len(b) > 0 { + var ep extensionPair + ep, b, err = unmarshalExtensionPair(b) + if err != nil { + return err + } + p.Extensions = append(p.Extensions, ep) + } + return nil +} + +type sshFxVersionPacket struct { + Version uint32 + Extensions []struct { + Name, Data string + } +} + +func (p sshFxVersionPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 // byte + uint32 + for _, e := range p.Extensions { + l += 4 + len(e.Name) + 4 + len(e.Data) + } + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_VERSION) + b = marshalUint32(b, p.Version) + for _, e := range p.Extensions { + b = marshalString(b, e.Name) + b = marshalString(b, e.Data) + } + return b, nil +} + +func marshalIDString(packetType byte, id uint32, str string) ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(str) + + b := make([]byte, 0, l) + b = append(b, packetType) + b = marshalUint32(b, id) + b = marshalString(b, str) + return b, nil +} + +func unmarshalIDString(b []byte, id *uint32, str *string) error { + var err error + *id, b, err = unmarshalUint32Safe(b) + if err != nil { + return err + } + *str, _, err = unmarshalStringSafe(b) + return err +} + +type sshFxpReaddirPacket struct { + ID uint32 + Handle string +} + +func (p sshFxpReaddirPacket) id() uint32 { return p.ID } + +func (p sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_READDIR, p.ID, p.Handle) +} + +func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpOpendirPacket struct { + ID uint32 + Path string +} + +func (p sshFxpOpendirPacket) id() uint32 { return p.ID } + +func (p sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_OPENDIR, p.ID, p.Path) +} + +func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpLstatPacket struct { + ID uint32 + Path string +} + +func (p sshFxpLstatPacket) id() uint32 { return p.ID } + +func (p sshFxpLstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_LSTAT, p.ID, p.Path) +} + +func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpStatPacket struct { + ID uint32 + Path string +} + +func (p sshFxpStatPacket) id() uint32 { return p.ID } + +func (p sshFxpStatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_STAT, p.ID, p.Path) +} + +func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpFstatPacket struct { + ID uint32 + Handle string +} + +func (p sshFxpFstatPacket) id() uint32 { return p.ID } + +func (p sshFxpFstatPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_FSTAT, p.ID, p.Handle) +} + +func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpClosePacket struct { + ID uint32 + Handle string +} + +func (p sshFxpClosePacket) id() uint32 { return p.ID } + +func (p sshFxpClosePacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_CLOSE, p.ID, p.Handle) +} + +func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Handle) +} + +type sshFxpRemovePacket struct { + ID uint32 + Filename string +} + +func (p sshFxpRemovePacket) id() uint32 { return p.ID } + +func (p sshFxpRemovePacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_REMOVE, p.ID, p.Filename) +} + +func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Filename) +} + +type sshFxpRmdirPacket struct { + ID uint32 + Path string +} + +func (p sshFxpRmdirPacket) id() uint32 { return p.ID } + +func (p sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_RMDIR, p.ID, p.Path) +} + +func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpSymlinkPacket struct { + ID uint32 + Targetpath string + Linkpath string +} + +func (p sshFxpSymlinkPacket) id() uint32 { return p.ID } + +func (p sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Targetpath) + + 4 + len(p.Linkpath) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_SYMLINK) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Targetpath) + b = marshalString(b, p.Linkpath) + return b, nil +} + +func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Linkpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpReadlinkPacket struct { + ID uint32 + Path string +} + +func (p sshFxpReadlinkPacket) id() uint32 { return p.ID } + +func (p sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_READLINK, p.ID, p.Path) +} + +func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpRealpathPacket struct { + ID uint32 + Path string +} + +func (p sshFxpRealpathPacket) id() uint32 { return p.ID } + +func (p sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { + return marshalIDString(ssh_FXP_REALPATH, p.ID, p.Path) +} + +func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { + return unmarshalIDString(b, &p.ID, &p.Path) +} + +type sshFxpNameAttr struct { + Name string + LongName string + Attrs []interface{} +} + +func (p sshFxpNameAttr) MarshalBinary() ([]byte, error) { + b := []byte{} + b = marshalString(b, p.Name) + b = marshalString(b, p.LongName) + for _, attr := range p.Attrs { + b = marshal(b, attr) + } + return b, nil +} + +type sshFxpNamePacket struct { + ID uint32 + NameAttrs []sshFxpNameAttr +} + +func (p sshFxpNamePacket) MarshalBinary() ([]byte, error) { + b := []byte{} + b = append(b, ssh_FXP_NAME) + b = marshalUint32(b, p.ID) + b = marshalUint32(b, uint32(len(p.NameAttrs))) + for _, na := range p.NameAttrs { + ab, err := na.MarshalBinary() + if err != nil { + return nil, err + } + + b = append(b, ab...) + } + return b, nil +} + +type sshFxpOpenPacket struct { + ID uint32 + Path string + Pflags uint32 + Flags uint32 // ignored +} + +func (p sshFxpOpenPacket) id() uint32 { return p.ID } + +func (p sshFxpOpenPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + + 4 + len(p.Path) + + 4 + 4 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_OPEN) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Pflags) + b = marshalUint32(b, p.Flags) + return b, nil +} + +func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpReadPacket struct { + ID uint32 + Handle string + Offset uint64 + Len uint32 +} + +func (p sshFxpReadPacket) id() uint32 { return p.ID } + +func (p sshFxpReadPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 8 + 4 // uint64 + uint32 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_READ) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Len) + return b, nil +} + +func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Len, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p sshFxpRenamePacket) id() uint32 { return p.ID } + +func (p sshFxpRenamePacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_RENAME) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + return b, nil +} + +func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpPosixRenamePacket struct { + ID uint32 + Oldpath string + Newpath string +} + +func (p sshFxpPosixRenamePacket) id() uint32 { return p.ID } + +func (p sshFxpPosixRenamePacket) MarshalBinary() ([]byte, error) { + const ext = "posix-rename@openssh.com" + l := 1 + 4 + // type(byte) + uint32 + 4 + len(ext) + + 4 + len(p.Oldpath) + + 4 + len(p.Newpath) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_EXTENDED) + b = marshalUint32(b, p.ID) + b = marshalString(b, ext) + b = marshalString(b, p.Oldpath) + b = marshalString(b, p.Newpath) + return b, nil +} + +type sshFxpWritePacket struct { + ID uint32 + Handle string + Offset uint64 + Length uint32 + Data []byte +} + +func (p sshFxpWritePacket) id() uint32 { return p.ID } + +func (p sshFxpWritePacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 8 + 4 + // uint64 + uint32 + len(p.Data) + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_WRITE) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint64(b, p.Offset) + b = marshalUint32(b, p.Length) + b = append(b, p.Data...) + return b, nil +} + +func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errShortPacket + } + + p.Data = append([]byte{}, b[:p.Length]...) + return nil +} + +type sshFxpMkdirPacket struct { + ID uint32 + Path string + Flags uint32 // ignored +} + +func (p sshFxpMkdirPacket) id() uint32 { return p.ID } + +func (p sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Path) + + 4 // uint32 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_MKDIR) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + return b, nil +} + +func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, _, err = unmarshalUint32Safe(b); err != nil { + return err + } + return nil +} + +type sshFxpSetstatPacket struct { + ID uint32 + Path string + Flags uint32 + Attrs interface{} +} + +type sshFxpFsetstatPacket struct { + ID uint32 + Handle string + Flags uint32 + Attrs interface{} +} + +func (p sshFxpSetstatPacket) id() uint32 { return p.ID } +func (p sshFxpFsetstatPacket) id() uint32 { return p.ID } + +func (p sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Path) + + 4 // uint32 + uint64 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_SETSTAT) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Path) + b = marshalUint32(b, p.Flags) + b = marshal(b, p.Attrs) + return b, nil +} + +func (p sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + 4 + len(p.Handle) + + 4 // uint32 + uint64 + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_FSETSTAT) + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + b = marshalUint32(b, p.Flags) + b = marshal(b, p.Attrs) + return b, nil +} + +func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { + return err + } + p.Attrs = b + return nil +} + +type sshFxpHandlePacket struct { + ID uint32 + Handle string +} + +func (p sshFxpHandlePacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_HANDLE} + b = marshalUint32(b, p.ID) + b = marshalString(b, p.Handle) + return b, nil +} + +type sshFxpStatusPacket struct { + ID uint32 + StatusError +} + +func (p sshFxpStatusPacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_STATUS} + b = marshalUint32(b, p.ID) + b = marshalStatus(b, p.StatusError) + return b, nil +} + +type sshFxpDataPacket struct { + ID uint32 + Length uint32 + Data []byte +} + +func (p sshFxpDataPacket) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_DATA} + b = marshalUint32(b, p.ID) + b = marshalUint32(b, p.Length) + b = append(b, p.Data[:p.Length]...) + return b, nil +} + +func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if uint32(len(b)) < p.Length { + return errors.New("truncated packet") + } + + p.Data = make([]byte, p.Length) + copy(p.Data, b) + return nil +} + +type sshFxpStatvfsPacket struct { + ID uint32 + Path string +} + +func (p sshFxpStatvfsPacket) id() uint32 { return p.ID } + +func (p sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { + l := 1 + 4 + // type(byte) + uint32 + len(p.Path) + + len("statvfs@openssh.com") + + b := make([]byte, 0, l) + b = append(b, ssh_FXP_EXTENDED) + b = marshalUint32(b, p.ID) + b = marshalString(b, "statvfs@openssh.com") + b = marshalString(b, p.Path) + return b, nil +} + +// A StatVFS contains statistics about a filesystem. +type StatVFS struct { + ID uint32 + Bsize uint64 /* file system block size */ + Frsize uint64 /* fundamental fs block size */ + Blocks uint64 /* number of blocks (unit f_frsize) */ + Bfree uint64 /* free blocks in file system */ + Bavail uint64 /* free blocks for non-root */ + Files uint64 /* total file inodes */ + Ffree uint64 /* free file inodes */ + Favail uint64 /* free file inodes for to non-root */ + Fsid uint64 /* file system id */ + Flag uint64 /* bit mask of f_flag values */ + Namemax uint64 /* maximum filename length */ +} + +// TotalSpace calculates the amount of total space in a filesystem. +func (p *StatVFS) TotalSpace() uint64 { + return p.Frsize * p.Blocks +} + +// FreeSpace calculates the amount of free space in a filesystem. +func (p *StatVFS) FreeSpace() uint64 { + return p.Frsize * p.Bfree +} + +// Convert to ssh_FXP_EXTENDED_REPLY packet binary format +func (p *StatVFS) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + buf.Write([]byte{ssh_FXP_EXTENDED_REPLY}) + err := binary.Write(&buf, binary.BigEndian, p) + return buf.Bytes(), err +} + +type sshFxpExtendedPacket struct { + ID uint32 + ExtendedRequest string + SpecificPacket interface { + serverRespondablePacket + readonly() bool + } +} + +func (p sshFxpExtendedPacket) id() uint32 { return p.ID } +func (p sshFxpExtendedPacket) readonly() bool { return p.SpecificPacket.readonly() } + +func (p sshFxpExtendedPacket) respond(svr *Server) error { + return p.SpecificPacket.respond(svr) +} + +func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { + var err error + bOrig := b + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, _, err = unmarshalStringSafe(b); err != nil { + return err + } + + // specific unmarshalling + switch p.ExtendedRequest { + case "statvfs@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} + case "posix-rename@openssh.com": + p.SpecificPacket = &sshFxpExtendedPacketPosixRename{} + default: + return errUnknownExtendedPacket + } + + return p.SpecificPacket.UnmarshalBinary(bOrig) +} + +type sshFxpExtendedPacketStatVFS struct { + ID uint32 + ExtendedRequest string + Path string +} + +func (p sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID } +func (p sshFxpExtendedPacketStatVFS) readonly() bool { return true } +func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Path, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +type sshFxpExtendedPacketPosixRename struct { + ID uint32 + ExtendedRequest string + Oldpath string + Newpath string +} + +func (p sshFxpExtendedPacketPosixRename) id() uint32 { return p.ID } +func (p sshFxpExtendedPacketPosixRename) readonly() bool { return false } +func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { + var err error + if p.ID, b, err = unmarshalUint32Safe(b); err != nil { + return err + } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { + return err + } else if p.Newpath, _, err = unmarshalStringSafe(b); err != nil { + return err + } + return nil +} + +func (p sshFxpExtendedPacketPosixRename) respond(s *Server) error { + err := os.Rename(p.Oldpath, p.Newpath) + return s.sendError(p, err) +} diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go new file mode 100644 index 00000000..b695528f --- /dev/null +++ b/vendor/github.com/pkg/sftp/release.go @@ -0,0 +1,5 @@ +// +build !debug + +package sftp + +func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/pkg/sftp/request-attrs.go b/vendor/github.com/pkg/sftp/request-attrs.go new file mode 100644 index 00000000..0149d930 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-attrs.go @@ -0,0 +1,63 @@ +package sftp + +// Methods on the Request object to make working with the Flags bitmasks and +// Attr(ibutes) byte blob easier. Use Pflags() when working with an Open/Write +// request and AttrFlags() and Attributes() when working with SetStat requests. + +import "os" + +// Open packet pflags +type pflags struct { + Read, Write, Append, Creat, Trunc, Excl bool +} + +// testable constructor +func newPflags(flags uint32) pflags { + return pflags{ + Read: flags&ssh_FXF_READ != 0, + Write: flags&ssh_FXF_WRITE != 0, + Append: flags&ssh_FXF_APPEND != 0, + Creat: flags&ssh_FXF_CREAT != 0, + Trunc: flags&ssh_FXF_TRUNC != 0, + Excl: flags&ssh_FXF_EXCL != 0, + } +} + +// Check bitmap/uint32 for Open packet pflag values +func (r *Request) Pflags() pflags { + return newPflags(r.Flags) +} + +// File attribute flags +type aflags struct { + Size, UidGid, Permissions, Acmodtime bool +} + +// testable constructor +func newAflags(flags uint32) aflags { + return aflags{ + Size: (flags & ssh_FILEXFER_ATTR_SIZE) != 0, + UidGid: (flags & ssh_FILEXFER_ATTR_UIDGID) != 0, + Permissions: (flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0, + Acmodtime: (flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0, + } +} + +// Check bitmap/uint32 for file attribute flags +func (r *Request) AttrFlags(flags uint32) aflags { + return newAflags(r.Flags) +} + +// File attributes +type fileattrs FileStat + +// Return Mode wrapped in os.FileMode +func (a fileattrs) FileMode() os.FileMode { + return os.FileMode(a.Mode) +} + +// Parse file attributes byte blob and return them in object +func (r *Request) Attributes() fileattrs { + fa, _ := getFileStat(r.Flags, r.Attrs) + return fileattrs(*fa) +} diff --git a/vendor/github.com/pkg/sftp/request-errors.go b/vendor/github.com/pkg/sftp/request-errors.go new file mode 100644 index 00000000..00451e74 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-errors.go @@ -0,0 +1,42 @@ +package sftp + +// Error types that match the SFTP's SSH_FXP_STATUS codes. Gives you more +// direct control of the errors being sent vs. letting the library work them +// out from the standard os/io errors. + +type fxerr uint32 + +const ( + ErrSshFxOk = fxerr(ssh_FX_OK) + ErrSshFxEof = fxerr(ssh_FX_EOF) + ErrSshFxNoSuchFile = fxerr(ssh_FX_NO_SUCH_FILE) + ErrSshFxPermissionDenied = fxerr(ssh_FX_PERMISSION_DENIED) + ErrSshFxFailure = fxerr(ssh_FX_FAILURE) + ErrSshFxBadMessage = fxerr(ssh_FX_BAD_MESSAGE) + ErrSshFxNoConnection = fxerr(ssh_FX_NO_CONNECTION) + ErrSshFxConnectionLost = fxerr(ssh_FX_CONNECTION_LOST) + ErrSshFxOpUnsupported = fxerr(ssh_FX_OP_UNSUPPORTED) +) + +func (e fxerr) Error() string { + switch e { + case ErrSshFxOk: + return "OK" + case ErrSshFxEof: + return "EOF" + case ErrSshFxNoSuchFile: + return "No Such File" + case ErrSshFxPermissionDenied: + return "Permission Denied" + case ErrSshFxBadMessage: + return "Bad Message" + case ErrSshFxNoConnection: + return "No Connection" + case ErrSshFxConnectionLost: + return "Connection Lost" + case ErrSshFxOpUnsupported: + return "Operation Unsupported" + default: + return "Failure" + } +} diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go new file mode 100644 index 00000000..96191e0d --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-example.go @@ -0,0 +1,261 @@ +package sftp + +// This serves as an example of how to implement the request server handler as +// well as a dummy backend for testing. It implements an in-memory backend that +// works as a very simple filesystem with simple flat key-value lookup system. + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "sync" + "time" +) + +// InMemHandler returns a Hanlders object with the test handlers +func InMemHandler() Handlers { + root := &root{ + files: make(map[string]*memFile), + } + root.memFile = newMemFile("/", true) + return Handlers{root, root, root, root} +} + +// Handlers +func (fs *root) Fileread(r *Request) (io.ReaderAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + file, err := fs.fetch(r.Filepath) + if err != nil { + return nil, err + } + if file.symlink != "" { + file, err = fs.fetch(file.symlink) + if err != nil { + return nil, err + } + } + return file.ReaderAt() +} + +func (fs *root) Filewrite(r *Request) (io.WriterAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + file, err := fs.fetch(r.Filepath) + if err == os.ErrNotExist { + dir, err := fs.fetch(filepath.Dir(r.Filepath)) + if err != nil { + return nil, err + } + if !dir.isdir { + return nil, os.ErrInvalid + } + file = newMemFile(r.Filepath, false) + fs.files[r.Filepath] = file + } + return file.WriterAt() +} + +func (fs *root) Filecmd(r *Request) error { + if fs.mockErr != nil { + return fs.mockErr + } + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + switch r.Method { + case "Setstat": + return nil + case "Rename": + file, err := fs.fetch(r.Filepath) + if err != nil { + return err + } + if _, ok := fs.files[r.Target]; ok { + return &os.LinkError{Op: "rename", Old: r.Filepath, New: r.Target, + Err: fmt.Errorf("dest file exists")} + } + fs.files[r.Target] = file + delete(fs.files, r.Filepath) + case "Rmdir", "Remove": + _, err := fs.fetch(filepath.Dir(r.Filepath)) + if err != nil { + return err + } + delete(fs.files, r.Filepath) + case "Mkdir": + _, err := fs.fetch(filepath.Dir(r.Filepath)) + if err != nil { + return err + } + fs.files[r.Filepath] = newMemFile(r.Filepath, true) + case "Symlink": + _, err := fs.fetch(r.Filepath) + if err != nil { + return err + } + link := newMemFile(r.Target, false) + link.symlink = r.Filepath + fs.files[r.Target] = link + } + return nil +} + +type listerat []os.FileInfo + +// Modeled after strings.Reader's ReadAt() implementation +func (f listerat) ListAt(ls []os.FileInfo, offset int64) (int, error) { + var n int + if offset >= int64(len(f)) { + return 0, io.EOF + } + n = copy(ls, f[offset:]) + if n < len(ls) { + return n, io.EOF + } + return n, nil +} + +func (fs *root) Filelist(r *Request) (ListerAt, error) { + if fs.mockErr != nil { + return nil, fs.mockErr + } + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + + switch r.Method { + case "List": + ordered_names := []string{} + for fn, _ := range fs.files { + if filepath.Dir(fn) == r.Filepath { + ordered_names = append(ordered_names, fn) + } + } + sort.Strings(ordered_names) + list := make([]os.FileInfo, len(ordered_names)) + for i, fn := range ordered_names { + list[i] = fs.files[fn] + } + return listerat(list), nil + case "Stat": + file, err := fs.fetch(r.Filepath) + if err != nil { + return nil, err + } + return listerat([]os.FileInfo{file}), nil + case "Readlink": + file, err := fs.fetch(r.Filepath) + if err != nil { + return nil, err + } + if file.symlink != "" { + file, err = fs.fetch(file.symlink) + if err != nil { + return nil, err + } + } + return listerat([]os.FileInfo{file}), nil + } + return nil, nil +} + +// In memory file-system-y thing that the Hanlders live on +type root struct { + *memFile + files map[string]*memFile + filesLock sync.Mutex + mockErr error +} + +// Set a mocked error that the next handler call will return. +// Set to nil to reset for no error. +func (fs *root) returnErr(err error) { + fs.mockErr = err +} + +func (fs *root) fetch(path string) (*memFile, error) { + if path == "/" { + return fs.memFile, nil + } + if file, ok := fs.files[path]; ok { + return file, nil + } + return nil, os.ErrNotExist +} + +// Implements os.FileInfo, Reader and Writer interfaces. +// These are the 3 interfaces necessary for the Handlers. +type memFile struct { + name string + modtime time.Time + symlink string + isdir bool + content []byte + contentLock sync.RWMutex +} + +// factory to make sure modtime is set +func newMemFile(name string, isdir bool) *memFile { + return &memFile{ + name: name, + modtime: time.Now(), + isdir: isdir, + } +} + +// Have memFile fulfill os.FileInfo interface +func (f *memFile) Name() string { return filepath.Base(f.name) } +func (f *memFile) Size() int64 { return int64(len(f.content)) } +func (f *memFile) Mode() os.FileMode { + ret := os.FileMode(0644) + if f.isdir { + ret = os.FileMode(0755) | os.ModeDir + } + if f.symlink != "" { + ret = os.FileMode(0777) | os.ModeSymlink + } + return ret +} +func (f *memFile) ModTime() time.Time { return f.modtime } +func (f *memFile) IsDir() bool { return f.isdir } +func (f *memFile) Sys() interface{} { + return fakeFileInfoSys() +} + +// Read/Write +func (f *memFile) ReaderAt() (io.ReaderAt, error) { + if f.isdir { + return nil, os.ErrInvalid + } + return bytes.NewReader(f.content), nil +} + +func (f *memFile) WriterAt() (io.WriterAt, error) { + if f.isdir { + return nil, os.ErrInvalid + } + return f, nil +} +func (f *memFile) WriteAt(p []byte, off int64) (int, error) { + // fmt.Println(string(p), off) + // mimic write delays, should be optional + time.Sleep(time.Microsecond * time.Duration(len(p))) + f.contentLock.Lock() + defer f.contentLock.Unlock() + plen := len(p) + int(off) + if plen >= len(f.content) { + nc := make([]byte, plen) + copy(nc, f.content) + f.content = nc + } + copy(f.content[off:], p) + return len(p), nil +} diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go new file mode 100644 index 00000000..44b8da10 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-interfaces.go @@ -0,0 +1,46 @@ +package sftp + +import ( + "io" + "os" +) + +// Interfaces are differentiated based on required returned values. +// All input arguments are to be pulled from Request (the only arg). + +// FileReader should return an io.ReaderAt for the filepath +// Note in cases of an error, the error text will be sent to the client. +type FileReader interface { + Fileread(*Request) (io.ReaderAt, error) +} + +// FileWriter should return an io.WriterAt for the filepath. +// +// The request server code will call Close() on the returned io.WriterAt +// ojbect if an io.Closer type assertion succeeds. +// Note in cases of an error, the error text will be sent to the client. +type FileWriter interface { + Filewrite(*Request) (io.WriterAt, error) +} + +// FileCmder should return an error +// Note in cases of an error, the error text will be sent to the client. +type FileCmder interface { + Filecmd(*Request) error +} + +// FileLister should return an object that fulfils the ListerAt interface +// Note in cases of an error, the error text will be sent to the client. +type FileLister interface { + Filelist(*Request) (ListerAt, error) +} + +// ListerAt does for file lists what io.ReaderAt does for files. +// ListAt should return the number of entries copied and an io.EOF +// error if at end of list. This is testable by comparing how many you +// copied to how many could be copied (eg. n < len(ls) below). +// The copy() builtin is best for the copying. +// Note in cases of an error, the error text will be sent to the client. +type ListerAt interface { + ListAt([]os.FileInfo, int64) (int, error) +} diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go new file mode 100644 index 00000000..ec64a1ff --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -0,0 +1,238 @@ +package sftp + +import ( + "context" + "encoding" + "io" + "path" + "path/filepath" + "strconv" + "sync" + "syscall" + + "github.com/pkg/errors" +) + +var maxTxPacket uint32 = 1 << 15 + +// Handlers contains the 4 SFTP server request handlers. +type Handlers struct { + FileGet FileReader + FilePut FileWriter + FileCmd FileCmder + FileList FileLister +} + +// RequestServer abstracts the sftp protocol with an http request-like protocol +type RequestServer struct { + *serverConn + Handlers Handlers + pktMgr *packetManager + openRequests map[string]*Request + openRequestLock sync.RWMutex + handleCount int +} + +// NewRequestServer creates/allocates/returns new RequestServer. +// Normally there there will be one server per user-session. +func NewRequestServer(rwc io.ReadWriteCloser, h Handlers) *RequestServer { + svrConn := &serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + } + return &RequestServer{ + serverConn: svrConn, + Handlers: h, + pktMgr: newPktMgr(svrConn), + openRequests: make(map[string]*Request), + } +} + +// New Open packet/Request +func (rs *RequestServer) nextRequest(r *Request) string { + rs.openRequestLock.Lock() + defer rs.openRequestLock.Unlock() + rs.handleCount++ + handle := strconv.Itoa(rs.handleCount) + rs.openRequests[handle] = r + return handle +} + +// Returns Request from openRequests, bool is false if it is missing +// If the method is different, save/return a new Request w/ that Method. +// +// The Requests in openRequests work essentially as open file descriptors that +// you can do different things with. What you are doing with it are denoted by +// the first packet of that type (read/write/etc). We create a new Request when +// it changes to set the request.Method attribute in a thread safe way. +func (rs *RequestServer) getRequest(handle, method string) (*Request, bool) { + rs.openRequestLock.RLock() + r, ok := rs.openRequests[handle] + rs.openRequestLock.RUnlock() + if !ok || r.Method == method { + return r, ok + } + // if we make it here we need to replace the request + rs.openRequestLock.Lock() + defer rs.openRequestLock.Unlock() + r, ok = rs.openRequests[handle] + if !ok || r.Method == method { // re-check needed b/c lock race + return r, ok + } + r = r.copy() + r.Method = method + rs.openRequests[handle] = r + return r, ok +} + +func (rs *RequestServer) closeRequest(handle string) error { + rs.openRequestLock.Lock() + defer rs.openRequestLock.Unlock() + if r, ok := rs.openRequests[handle]; ok { + delete(rs.openRequests, handle) + return r.close() + } + return syscall.EBADF +} + +// Close the read/write/closer to trigger exiting the main server loop +func (rs *RequestServer) Close() error { return rs.conn.Close() } + +// Serve requests for user session +func (rs *RequestServer) Serve() error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var wg sync.WaitGroup + runWorker := func(ch requestChan) { + wg.Add(1) + go func() { + defer wg.Done() + if err := rs.packetWorker(ctx, ch); err != nil { + rs.conn.Close() // shuts down recvPacket + } + }() + } + pktChan := rs.pktMgr.workerChan(runWorker) + + var err error + var pkt requestPacket + var pktType uint8 + var pktBytes []byte + for { + pktType, pktBytes, err = rs.recvPacket() + if err != nil { + break + } + + pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + if err != nil { + debug("makePacket err: %v", err) + rs.conn.Close() // shuts down recvPacket + break + } + + pktChan <- pkt + } + + close(pktChan) // shuts down sftpServerWorkers + wg.Wait() // wait for all workers to exit + + // make sure all open requests are properly closed + // (eg. possible on dropped connections, client crashes, etc.) + for handle, req := range rs.openRequests { + delete(rs.openRequests, handle) + req.close() + } + + return err +} + +func (rs *RequestServer) packetWorker( + ctx context.Context, pktChan chan requestPacket, +) error { + for pkt := range pktChan { + var rpkt responsePacket + switch pkt := pkt.(type) { + case *sshFxInitPacket: + rpkt = sshFxVersionPacket{sftpProtocolVersion, nil} + case *sshFxpClosePacket: + handle := pkt.getHandle() + rpkt = statusFromError(pkt, rs.closeRequest(handle)) + case *sshFxpRealpathPacket: + rpkt = cleanPacketPath(pkt) + case *sshFxpOpendirPacket: + request := requestFromPacket(ctx, pkt) + handle := rs.nextRequest(request) + rpkt = sshFxpHandlePacket{pkt.id(), handle} + case *sshFxpOpenPacket: + request := requestFromPacket(ctx, pkt) + handle := rs.nextRequest(request) + rpkt = sshFxpHandlePacket{pkt.id(), handle} + if pkt.hasPflags(ssh_FXF_CREAT) { + if p := request.call(rs.Handlers, pkt); !isOk(p) { + rpkt = p // if error in write, return it + } + } + case hasHandle: + handle := pkt.getHandle() + request, ok := rs.getRequest(handle, requestMethod(pkt)) + if !ok { + rpkt = statusFromError(pkt, syscall.EBADF) + } else { + rpkt = request.call(rs.Handlers, pkt) + } + case hasPath: + request := requestFromPacket(ctx, pkt) + rpkt = request.call(rs.Handlers, pkt) + request.close() + default: + return errors.Errorf("unexpected packet type %T", pkt) + } + + err := rs.sendPacket(rpkt) + if err != nil { + return err + } + } + return nil +} + +// True is responsePacket is an OK status packet +func isOk(rpkt responsePacket) bool { + p, ok := rpkt.(sshFxpStatusPacket) + return ok && p.StatusError.Code == ssh_FX_OK +} + +// clean and return name packet for file +func cleanPacketPath(pkt *sshFxpRealpathPacket) responsePacket { + path := cleanPath(pkt.getPath()) + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []sshFxpNameAttr{{ + Name: path, + LongName: path, + Attrs: emptyFileStat, + }}, + } +} + +// Makes sure we have a clean POSIX (/) absolute path to work with +func cleanPath(p string) string { + p = filepath.ToSlash(p) + if !filepath.IsAbs(p) { + p = "/" + p + } + return path.Clean(p) +} + +// Wrap underlying connection methods to use packetManager +func (rs *RequestServer) sendPacket(m encoding.BinaryMarshaler) error { + if pkt, ok := m.(responsePacket); ok { + rs.pktMgr.readyPacket(pkt) + } else { + return errors.Errorf("unexpected packet type %T", m) + } + return nil +} diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go new file mode 100644 index 00000000..a71a8980 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request-unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package sftp + +import ( + "errors" + "syscall" +) + +func fakeFileInfoSys() interface{} { + return &syscall.Stat_t{Uid: 65534, Gid: 65534} +} + +func testOsSys(sys interface{}) error { + fstat := sys.(*FileStat) + if fstat.UID != uint32(65534) { + return errors.New("Uid failed to match.") + } + if fstat.GID != uint32(65534) { + return errors.New("Gid failed to match:") + } + return nil +} diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go new file mode 100644 index 00000000..d74d6fa3 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request.go @@ -0,0 +1,353 @@ +package sftp + +import ( + "context" + "io" + "os" + "path" + "path/filepath" + "sync" + "syscall" + + "github.com/pkg/errors" +) + +// MaxFilelist is the max number of files to return in a readdir batch. +var MaxFilelist int64 = 100 + +// Request contains the data and state for the incoming service request. +type Request struct { + // Get, Put, Setstat, Stat, Rename, Remove + // Rmdir, Mkdir, List, Readlink, Symlink + Method string + Filepath string + Flags uint32 + Attrs []byte // convert to sub-struct + Target string // for renames and sym-links + // reader/writer/readdir from handlers + state state + // context lasts duration of request + ctx context.Context + cancelCtx context.CancelFunc +} + +type state struct { + *sync.RWMutex + writerAt io.WriterAt + readerAt io.ReaderAt + listerAt ListerAt + lsoffset int64 +} + +// New Request initialized based on packet data +func requestFromPacket(ctx context.Context, pkt hasPath) *Request { + method := requestMethod(pkt) + request := NewRequest(method, pkt.getPath()) + request.ctx, request.cancelCtx = context.WithCancel(ctx) + + switch p := pkt.(type) { + case *sshFxpOpenPacket: + request.Flags = p.Pflags + case *sshFxpSetstatPacket: + request.Flags = p.Flags + request.Attrs = p.Attrs.([]byte) + case *sshFxpRenamePacket: + request.Target = cleanPath(p.Newpath) + case *sshFxpSymlinkPacket: + request.Target = cleanPath(p.Linkpath) + } + return request +} + +// NewRequest creates a new Request object. +func NewRequest(method, path string) *Request { + return &Request{Method: method, Filepath: cleanPath(path), + state: state{RWMutex: new(sync.RWMutex)}} +} + +// shallow copy of existing request +func (r *Request) copy() *Request { + r.state.Lock() + defer r.state.Unlock() + r2 := new(Request) + *r2 = *r + return r2 +} + +// Context returns the request's context. To change the context, +// use WithContext. +// +// The returned context is always non-nil; it defaults to the +// background context. +// +// For incoming server requests, the context is canceled when the +// request is complete or the client's connection closes. +func (r *Request) Context() context.Context { + if r.ctx != nil { + return r.ctx + } + return context.Background() +} + +// WithContext returns a copy of r with its context changed to ctx. +// The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + if ctx == nil { + panic("nil context") + } + r2 := r.copy() + r2.ctx = ctx + r2.cancelCtx = nil + return r2 +} + +// Returns current offset for file list +func (r *Request) lsNext() int64 { + r.state.RLock() + defer r.state.RUnlock() + return r.state.lsoffset +} + +// Increases next offset +func (r *Request) lsInc(offset int64) { + r.state.Lock() + defer r.state.Unlock() + r.state.lsoffset = r.state.lsoffset + offset +} + +// manage file read/write state +func (r *Request) setWriterState(wa io.WriterAt) { + r.state.Lock() + defer r.state.Unlock() + r.state.writerAt = wa +} +func (r *Request) setReaderState(ra io.ReaderAt) { + r.state.Lock() + defer r.state.Unlock() + r.state.readerAt = ra +} +func (r *Request) setListerState(la ListerAt) { + r.state.Lock() + defer r.state.Unlock() + r.state.listerAt = la +} + +func (r *Request) getWriter() io.WriterAt { + r.state.RLock() + defer r.state.RUnlock() + return r.state.writerAt +} + +func (r *Request) getReader() io.ReaderAt { + r.state.RLock() + defer r.state.RUnlock() + return r.state.readerAt +} + +func (r *Request) getLister() ListerAt { + r.state.RLock() + defer r.state.RUnlock() + return r.state.listerAt +} + +// Close reader/writer if possible +func (r *Request) close() error { + if r.cancelCtx != nil { + r.cancelCtx() + } + rd := r.getReader() + if c, ok := rd.(io.Closer); ok { + return c.Close() + } + wt := r.getWriter() + if c, ok := wt.(io.Closer); ok { + return c.Close() + } + return nil +} + +// called from worker to handle packet/request +func (r *Request) call(handlers Handlers, pkt requestPacket) responsePacket { + switch r.Method { + case "Get": + return fileget(handlers.FileGet, r, pkt) + case "Put", "Open": + return fileput(handlers.FilePut, r, pkt) + case "Setstat", "Rename", "Rmdir", "Mkdir", "Symlink", "Remove": + return filecmd(handlers.FileCmd, r, pkt) + case "List", "Stat", "Readlink": + return filelist(handlers.FileList, r, pkt) + default: + return statusFromError(pkt, + errors.Errorf("unexpected method: %s", r.Method)) + } +} + +// file data for additional read/write packets +func packetData(p requestPacket) (data []byte, offset int64, length uint32) { + switch p := p.(type) { + case *sshFxpReadPacket: + length = p.Len + offset = int64(p.Offset) + case *sshFxpWritePacket: + data = p.Data + length = p.Length + offset = int64(p.Offset) + } + return +} + +// wrap FileReader handler +func fileget(h FileReader, r *Request, pkt requestPacket) responsePacket { + var err error + reader := r.getReader() + if reader == nil { + reader, err = h.Fileread(r) + if err != nil { + return statusFromError(pkt, err) + } + r.setReaderState(reader) + } + + _, offset, length := packetData(pkt) + data := make([]byte, clamp(length, maxTxPacket)) + n, err := reader.ReadAt(data, offset) + // only return EOF erro if no data left to read + if err != nil && (err != io.EOF || n == 0) { + return statusFromError(pkt, err) + } + return &sshFxpDataPacket{ + ID: pkt.id(), + Length: uint32(n), + Data: data[:n], + } +} + +// wrap FileWriter handler +func fileput(h FileWriter, r *Request, pkt requestPacket) responsePacket { + var err error + writer := r.getWriter() + if writer == nil { + writer, err = h.Filewrite(r) + if err != nil { + return statusFromError(pkt, err) + } + r.setWriterState(writer) + } + + data, offset, _ := packetData(pkt) + _, err = writer.WriteAt(data, offset) + return statusFromError(pkt, err) +} + +// wrap FileCmder handler +func filecmd(h FileCmder, r *Request, pkt requestPacket) responsePacket { + err := h.Filecmd(r) + return statusFromError(pkt, err) +} + +// wrap FileLister handler +func filelist(h FileLister, r *Request, pkt requestPacket) responsePacket { + var err error + lister := r.getLister() + if lister == nil { + lister, err = h.Filelist(r) + if err != nil { + return statusFromError(pkt, err) + } + r.setListerState(lister) + } + + offset := r.lsNext() + finfo := make([]os.FileInfo, MaxFilelist) + n, err := lister.ListAt(finfo, offset) + r.lsInc(int64(n)) + // ignore EOF as we only return it when there are no results + finfo = finfo[:n] // avoid need for nil tests below + + switch r.Method { + case "List": + if err != nil && err != io.EOF { + return statusFromError(pkt, err) + } + if n == 0 { + return statusFromError(pkt, io.EOF) + } + dirname := filepath.ToSlash(path.Base(r.Filepath)) + ret := &sshFxpNamePacket{ID: pkt.id()} + + for _, fi := range finfo { + ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{ + Name: fi.Name(), + LongName: runLs(dirname, fi), + Attrs: []interface{}{fi}, + }) + } + return ret + case "Stat": + if err != nil && err != io.EOF { + return statusFromError(pkt, err) + } + if n == 0 { + err = &os.PathError{Op: "stat", Path: r.Filepath, + Err: syscall.ENOENT} + return statusFromError(pkt, err) + } + return &sshFxpStatResponse{ + ID: pkt.id(), + info: finfo[0], + } + case "Readlink": + if err != nil && err != io.EOF { + return statusFromError(pkt, err) + } + if n == 0 { + err = &os.PathError{Op: "readlink", Path: r.Filepath, + Err: syscall.ENOENT} + return statusFromError(pkt, err) + } + filename := finfo[0].Name() + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []sshFxpNameAttr{{ + Name: filename, + LongName: filename, + Attrs: emptyFileStat, + }}, + } + default: + err = errors.Errorf("unexpected method: %s", r.Method) + return statusFromError(pkt, err) + } +} + +// init attributes of request object from packet data +func requestMethod(p requestPacket) (method string) { + switch p.(type) { + case *sshFxpReadPacket: + method = "Get" + case *sshFxpWritePacket: + method = "Put" + case *sshFxpReaddirPacket: + method = "List" + case *sshFxpOpenPacket, *sshFxpOpendirPacket: + method = "Open" + case *sshFxpSetstatPacket, *sshFxpFsetstatPacket: + method = "Setstat" + case *sshFxpRenamePacket: + method = "Rename" + case *sshFxpSymlinkPacket: + method = "Symlink" + case *sshFxpRemovePacket: + method = "Remove" + case *sshFxpStatPacket, *sshFxpLstatPacket, *sshFxpFstatPacket: + method = "Stat" + case *sshFxpRmdirPacket: + method = "Rmdir" + case *sshFxpReadlinkPacket: + method = "Readlink" + case *sshFxpMkdirPacket: + method = "Mkdir" + } + return method +} diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go new file mode 100644 index 00000000..94d306b6 --- /dev/null +++ b/vendor/github.com/pkg/sftp/request_windows.go @@ -0,0 +1,11 @@ +package sftp + +import "syscall" + +func fakeFileInfoSys() interface{} { + return syscall.Win32FileAttributeData{} +} + +func testOsSys(sys interface{}) error { + return nil +} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go new file mode 100644 index 00000000..16678d1f --- /dev/null +++ b/vendor/github.com/pkg/sftp/server.go @@ -0,0 +1,675 @@ +package sftp + +// sftp server counterpart + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" +) + +const ( + SftpServerWorkerCount = 8 +) + +// Server is an SSH File Transfer Protocol (sftp) server. +// This is intended to provide the sftp subsystem to an ssh server daemon. +// This implementation currently supports most of sftp server protocol version 3, +// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +type Server struct { + *serverConn + debugStream io.Writer + readOnly bool + pktMgr *packetManager + openFiles map[string]*os.File + openFilesLock sync.RWMutex + handleCount int + maxTxPacket uint32 +} + +func (svr *Server) nextHandle(f *os.File) string { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + svr.handleCount++ + handle := strconv.Itoa(svr.handleCount) + svr.openFiles[handle] = f + return handle +} + +func (svr *Server) closeHandle(handle string) error { + svr.openFilesLock.Lock() + defer svr.openFilesLock.Unlock() + if f, ok := svr.openFiles[handle]; ok { + delete(svr.openFiles, handle) + return f.Close() + } + + return syscall.EBADF +} + +func (svr *Server) getHandle(handle string) (*os.File, bool) { + svr.openFilesLock.RLock() + defer svr.openFilesLock.RUnlock() + f, ok := svr.openFiles[handle] + return f, ok +} + +type serverRespondablePacket interface { + encoding.BinaryUnmarshaler + id() uint32 + respond(svr *Server) error +} + +// NewServer creates a new Server instance around the provided streams, serving +// content from the root of the filesystem. Optionally, ServerOption +// functions may be specified to further configure the Server. +// +// A subsequent call to Serve() is required to begin serving files over SFTP. +func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) { + svrConn := &serverConn{ + conn: conn{ + Reader: rwc, + WriteCloser: rwc, + }, + } + s := &Server{ + serverConn: svrConn, + debugStream: ioutil.Discard, + pktMgr: newPktMgr(svrConn), + openFiles: make(map[string]*os.File), + maxTxPacket: 1 << 15, + } + + for _, o := range options { + if err := o(s); err != nil { + return nil, err + } + } + + return s, nil +} + +// A ServerOption is a function which applies configuration to a Server. +type ServerOption func(*Server) error + +// WithDebug enables Server debugging output to the supplied io.Writer. +func WithDebug(w io.Writer) ServerOption { + return func(s *Server) error { + s.debugStream = w + return nil + } +} + +// ReadOnly configures a Server to serve files in read-only mode. +func ReadOnly() ServerOption { + return func(s *Server) error { + s.readOnly = true + return nil + } +} + +type rxPacket struct { + pktType fxp + pktBytes []byte +} + +// Up to N parallel servers +func (svr *Server) sftpServerWorker(pktChan chan requestPacket) error { + for pkt := range pktChan { + + // readonly checks + readonly := true + switch pkt := pkt.(type) { + case notReadOnly: + readonly = false + case *sshFxpOpenPacket: + readonly = pkt.readonly() + case *sshFxpExtendedPacket: + readonly = pkt.SpecificPacket.readonly() + } + + // If server is operating read-only and a write operation is requested, + // return permission denied + if !readonly && svr.readOnly { + if err := svr.sendError(pkt, syscall.EPERM); err != nil { + return errors.Wrap(err, "failed to send read only packet response") + } + continue + } + + if err := handlePacket(svr, pkt); err != nil { + return err + } + } + return nil +} + +func handlePacket(s *Server, p interface{}) error { + switch p := p.(type) { + case *sshFxInitPacket: + return s.sendPacket(sshFxVersionPacket{sftpProtocolVersion, nil}) + case *sshFxpStatPacket: + // stat the requested file + info, err := os.Stat(p.Path) + if err != nil { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpLstatPacket: + // stat the requested file + info, err := os.Lstat(p.Path) + if err != nil { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpFstatPacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + info, err := f.Stat() + if err != nil { + return s.sendError(p, err) + } + + return s.sendPacket(sshFxpStatResponse{ + ID: p.ID, + info: info, + }) + case *sshFxpMkdirPacket: + // TODO FIXME: ignore flags field + err := os.Mkdir(p.Path, 0755) + return s.sendError(p, err) + case *sshFxpRmdirPacket: + err := os.Remove(p.Path) + return s.sendError(p, err) + case *sshFxpRemovePacket: + err := os.Remove(p.Filename) + return s.sendError(p, err) + case *sshFxpRenamePacket: + err := os.Rename(p.Oldpath, p.Newpath) + return s.sendError(p, err) + case *sshFxpSymlinkPacket: + err := os.Symlink(p.Targetpath, p.Linkpath) + return s.sendError(p, err) + case *sshFxpClosePacket: + return s.sendError(p, s.closeHandle(p.Handle)) + case *sshFxpReadlinkPacket: + f, err := os.Readlink(p.Path) + if err != nil { + return s.sendError(p, err) + } + + return s.sendPacket(sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []sshFxpNameAttr{{ + Name: f, + LongName: f, + Attrs: emptyFileStat, + }}, + }) + + case *sshFxpRealpathPacket: + f, err := filepath.Abs(p.Path) + if err != nil { + return s.sendError(p, err) + } + f = cleanPath(f) + return s.sendPacket(sshFxpNamePacket{ + ID: p.ID, + NameAttrs: []sshFxpNameAttr{{ + Name: f, + LongName: f, + Attrs: emptyFileStat, + }}, + }) + case *sshFxpOpendirPacket: + return sshFxpOpenPacket{ + ID: p.ID, + Path: p.Path, + Pflags: ssh_FXF_READ, + }.respond(s) + case *sshFxpReadPacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + data := make([]byte, clamp(p.Len, s.maxTxPacket)) + n, err := f.ReadAt(data, int64(p.Offset)) + if err != nil && (err != io.EOF || n == 0) { + return s.sendError(p, err) + } + return s.sendPacket(sshFxpDataPacket{ + ID: p.ID, + Length: uint32(n), + Data: data[:n], + }) + case *sshFxpWritePacket: + f, ok := s.getHandle(p.Handle) + if !ok { + return s.sendError(p, syscall.EBADF) + } + + _, err := f.WriteAt(p.Data, int64(p.Offset)) + return s.sendError(p, err) + case serverRespondablePacket: + err := p.respond(s) + return errors.Wrap(err, "pkt.respond failed") + default: + return errors.Errorf("unexpected packet type %T", p) + } +} + +// Serve serves SFTP connections until the streams stop or the SFTP subsystem +// is stopped. +func (svr *Server) Serve() error { + var wg sync.WaitGroup + runWorker := func(ch requestChan) { + wg.Add(1) + go func() { + defer wg.Done() + if err := svr.sftpServerWorker(ch); err != nil { + svr.conn.Close() // shuts down recvPacket + } + }() + } + pktChan := svr.pktMgr.workerChan(runWorker) + + var err error + var pkt requestPacket + var pktType uint8 + var pktBytes []byte + for { + pktType, pktBytes, err = svr.recvPacket() + if err != nil { + break + } + + pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + if err != nil { + debug("makePacket err: %v", err) + svr.conn.Close() // shuts down recvPacket + break + } + + pktChan <- pkt + } + + close(pktChan) // shuts down sftpServerWorkers + wg.Wait() // wait for all workers to exit + + // close any still-open files + for handle, file := range svr.openFiles { + fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name()) + file.Close() + } + return err // error from recvPacket +} + +// Wrap underlying connection methods to use packetManager +func (svr *Server) sendPacket(m encoding.BinaryMarshaler) error { + if pkt, ok := m.(responsePacket); ok { + svr.pktMgr.readyPacket(pkt) + } else { + return errors.Errorf("unexpected packet type %T", m) + } + return nil +} + +func (svr *Server) sendError(p ider, err error) error { + return svr.sendPacket(statusFromError(p, err)) +} + +type ider interface { + id() uint32 +} + +// The init packet has no ID, so we just return a zero-value ID +func (p sshFxInitPacket) id() uint32 { return 0 } + +type sshFxpStatResponse struct { + ID uint32 + info os.FileInfo +} + +func (p sshFxpStatResponse) MarshalBinary() ([]byte, error) { + b := []byte{ssh_FXP_ATTRS} + b = marshalUint32(b, p.ID) + b = marshalFileInfo(b, p.info) + return b, nil +} + +var emptyFileStat = []interface{}{uint32(0)} + +func (p sshFxpOpenPacket) readonly() bool { + return !p.hasPflags(ssh_FXF_WRITE) +} + +func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool { + for _, f := range flags { + if p.Pflags&f == 0 { + return false + } + } + return true +} + +func (p sshFxpOpenPacket) respond(svr *Server) error { + var osFlags int + if p.hasPflags(ssh_FXF_READ, ssh_FXF_WRITE) { + osFlags |= os.O_RDWR + } else if p.hasPflags(ssh_FXF_WRITE) { + osFlags |= os.O_WRONLY + } else if p.hasPflags(ssh_FXF_READ) { + osFlags |= os.O_RDONLY + } else { + // how are they opening? + return svr.sendError(p, syscall.EINVAL) + } + + if p.hasPflags(ssh_FXF_APPEND) { + osFlags |= os.O_APPEND + } + if p.hasPflags(ssh_FXF_CREAT) { + osFlags |= os.O_CREATE + } + if p.hasPflags(ssh_FXF_TRUNC) { + osFlags |= os.O_TRUNC + } + if p.hasPflags(ssh_FXF_EXCL) { + osFlags |= os.O_EXCL + } + + f, err := os.OpenFile(p.Path, osFlags, 0644) + if err != nil { + return svr.sendError(p, err) + } + + handle := svr.nextHandle(f) + return svr.sendPacket(sshFxpHandlePacket{p.ID, handle}) +} + +func (p sshFxpReaddirPacket) respond(svr *Server) error { + f, ok := svr.getHandle(p.Handle) + if !ok { + return svr.sendError(p, syscall.EBADF) + } + + dirname := f.Name() + dirents, err := f.Readdir(128) + if err != nil { + return svr.sendError(p, err) + } + + ret := sshFxpNamePacket{ID: p.ID} + for _, dirent := range dirents { + ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{ + Name: dirent.Name(), + LongName: runLs(dirname, dirent), + Attrs: []interface{}{dirent}, + }) + } + return svr.sendPacket(ret) +} + +func (p sshFxpSetstatPacket) respond(svr *Server) error { + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + debug("setstat name \"%s\"", p.Path) + if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = os.Truncate(p.Path, int64(size)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = os.Chmod(p.Path, os.FileMode(mode)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(p.Path, atimeT, mtimeT) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, _, err = unmarshalUint32Safe(b); err != nil { + } else { + err = os.Chown(p.Path, int(uid), int(gid)) + } + } + + return svr.sendError(p, err) +} + +func (p sshFxpFsetstatPacket) respond(svr *Server) error { + f, ok := svr.getHandle(p.Handle) + if !ok { + return svr.sendError(p, syscall.EBADF) + } + + // additional unmarshalling is required for each possibility here + b := p.Attrs.([]byte) + var err error + + debug("fsetstat name \"%s\"", f.Name()) + if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { + var size uint64 + if size, b, err = unmarshalUint64Safe(b); err == nil { + err = f.Truncate(int64(size)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { + var mode uint32 + if mode, b, err = unmarshalUint32Safe(b); err == nil { + err = f.Chmod(os.FileMode(mode)) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { + var atime uint32 + var mtime uint32 + if atime, b, err = unmarshalUint32Safe(b); err != nil { + } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { + } else { + atimeT := time.Unix(int64(atime), 0) + mtimeT := time.Unix(int64(mtime), 0) + err = os.Chtimes(f.Name(), atimeT, mtimeT) + } + } + if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { + var uid uint32 + var gid uint32 + if uid, b, err = unmarshalUint32Safe(b); err != nil { + } else if gid, _, err = unmarshalUint32Safe(b); err != nil { + } else { + err = f.Chown(int(uid), int(gid)) + } + } + + return svr.sendError(p, err) +} + +// translateErrno translates a syscall error number to a SFTP error code. +func translateErrno(errno syscall.Errno) uint32 { + switch errno { + case 0: + return ssh_FX_OK + case syscall.ENOENT: + return ssh_FX_NO_SUCH_FILE + case syscall.EPERM: + return ssh_FX_PERMISSION_DENIED + } + + return ssh_FX_FAILURE +} + +func statusFromError(p ider, err error) sshFxpStatusPacket { + ret := sshFxpStatusPacket{ + ID: p.id(), + StatusError: StatusError{ + // ssh_FX_OK = 0 + // ssh_FX_EOF = 1 + // ssh_FX_NO_SUCH_FILE = 2 ENOENT + // ssh_FX_PERMISSION_DENIED = 3 + // ssh_FX_FAILURE = 4 + // ssh_FX_BAD_MESSAGE = 5 + // ssh_FX_NO_CONNECTION = 6 + // ssh_FX_CONNECTION_LOST = 7 + // ssh_FX_OP_UNSUPPORTED = 8 + Code: ssh_FX_OK, + }, + } + if err == nil { + return ret + } + + debug("statusFromError: error is %T %#v", err, err) + ret.StatusError.Code = ssh_FX_FAILURE + ret.StatusError.msg = err.Error() + + switch e := err.(type) { + case syscall.Errno: + ret.StatusError.Code = translateErrno(e) + case *os.PathError: + debug("statusFromError,pathError: error is %T %#v", e.Err, e.Err) + if errno, ok := e.Err.(syscall.Errno); ok { + ret.StatusError.Code = translateErrno(errno) + } + case fxerr: + ret.StatusError.Code = uint32(e) + default: + switch e { + case io.EOF: + ret.StatusError.Code = ssh_FX_EOF + case os.ErrNotExist: + ret.StatusError.Code = ssh_FX_NO_SUCH_FILE + } + } + + return ret +} + +func clamp(v, max uint32) uint32 { + if v > max { + return max + } + return v +} + +func runLsTypeWord(dirent os.FileInfo) string { + // find first character, the type char + // b Block special file. + // c Character special file. + // d Directory. + // l Symbolic link. + // s Socket link. + // p FIFO. + // - Regular file. + tc := '-' + mode := dirent.Mode() + if (mode & os.ModeDir) != 0 { + tc = 'd' + } else if (mode & os.ModeDevice) != 0 { + tc = 'b' + if (mode & os.ModeCharDevice) != 0 { + tc = 'c' + } + } else if (mode & os.ModeSymlink) != 0 { + tc = 'l' + } else if (mode & os.ModeSocket) != 0 { + tc = 's' + } else if (mode & os.ModeNamedPipe) != 0 { + tc = 'p' + } + + // owner + orc := '-' + if (mode & 0400) != 0 { + orc = 'r' + } + owc := '-' + if (mode & 0200) != 0 { + owc = 'w' + } + oxc := '-' + ox := (mode & 0100) != 0 + setuid := (mode & os.ModeSetuid) != 0 + if ox && setuid { + oxc = 's' + } else if setuid { + oxc = 'S' + } else if ox { + oxc = 'x' + } + + // group + grc := '-' + if (mode & 040) != 0 { + grc = 'r' + } + gwc := '-' + if (mode & 020) != 0 { + gwc = 'w' + } + gxc := '-' + gx := (mode & 010) != 0 + setgid := (mode & os.ModeSetgid) != 0 + if gx && setgid { + gxc = 's' + } else if setgid { + gxc = 'S' + } else if gx { + gxc = 'x' + } + + // all / others + arc := '-' + if (mode & 04) != 0 { + arc = 'r' + } + awc := '-' + if (mode & 02) != 0 { + awc = 'w' + } + axc := '-' + ax := (mode & 01) != 0 + sticky := (mode & os.ModeSticky) != 0 + if ax && sticky { + axc = 't' + } else if sticky { + axc = 'T' + } else if ax { + axc = 'x' + } + + return fmt.Sprintf("%c%c%c%c%c%c%c%c%c%c", tc, orc, owc, oxc, grc, gwc, gxc, arc, awc, axc) +} diff --git a/vendor/github.com/pkg/sftp/server_standalone/main.go b/vendor/github.com/pkg/sftp/server_standalone/main.go new file mode 100644 index 00000000..0b8e102a --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_standalone/main.go @@ -0,0 +1,52 @@ +package main + +// small wrapper around sftp server that allows it to be used as a separate process subsystem call by the ssh server. +// in practice this will statically link; however this allows unit testing from the sftp client. + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/pkg/sftp" +) + +func main() { + var ( + readOnly bool + debugStderr bool + debugLevel string + options []sftp.ServerOption + ) + + flag.BoolVar(&readOnly, "R", false, "read-only server") + flag.BoolVar(&debugStderr, "e", false, "debug to stderr") + flag.StringVar(&debugLevel, "l", "none", "debug level (ignored)") + flag.Parse() + + debugStream := ioutil.Discard + if debugStderr { + debugStream = os.Stderr + } + options = append(options, sftp.WithDebug(debugStream)) + + if readOnly { + options = append(options, sftp.ReadOnly()) + } + + svr, _ := sftp.NewServer( + struct { + io.Reader + io.WriteCloser + }{os.Stdin, + os.Stdout, + }, + options..., + ) + if err := svr.Serve(); err != nil { + fmt.Fprintf(debugStream, "sftp server completed with error: %v", err) + os.Exit(1) + } +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go new file mode 100644 index 00000000..8c01dac5 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go @@ -0,0 +1,21 @@ +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness? + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024 + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go new file mode 100644 index 00000000..c37a34ac --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_impl.go @@ -0,0 +1,25 @@ +// +build darwin linux + +// fill in statvfs structure with OS specific values +// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance) + +package sftp + +import ( + "syscall" +) + +func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error { + stat := &syscall.Statfs_t{} + if err := syscall.Statfs(p.Path, stat); err != nil { + return svr.sendPacket(statusFromError(p, err)) + } + + retPkt, err := statvfsFromStatfst(stat) + if err != nil { + return svr.sendPacket(statusFromError(p, err)) + } + retPkt.ID = p.ID + + return svr.sendPacket(retPkt) +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go new file mode 100644 index 00000000..1d180d47 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_linux.go @@ -0,0 +1,22 @@ +// +build linux + +package sftp + +import ( + "syscall" +) + +func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { + return &StatVFS{ + Bsize: uint64(stat.Bsize), + Frsize: uint64(stat.Frsize), + Blocks: stat.Blocks, + Bfree: stat.Bfree, + Bavail: stat.Bavail, + Files: stat.Files, + Ffree: stat.Ffree, + Favail: stat.Ffree, // not sure how to calculate Favail + Flag: uint64(stat.Flags), // assuming POSIX? + Namemax: uint64(stat.Namelen), + }, nil +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go new file mode 100644 index 00000000..3fe40788 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go @@ -0,0 +1,11 @@ +// +build !darwin,!linux + +package sftp + +import ( + "syscall" +) + +func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error { + return syscall.ENOTSUP +} diff --git a/vendor/github.com/pkg/sftp/server_stubs.go b/vendor/github.com/pkg/sftp/server_stubs.go new file mode 100644 index 00000000..a14c7348 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_stubs.go @@ -0,0 +1,32 @@ +// +build !cgo,!plan9 windows android + +package sftp + +import ( + "os" + "time" + "fmt" +) + +func runLs(dirname string, dirent os.FileInfo) string { + typeword := runLsTypeWord(dirent) + numLinks := 1 + if dirent.IsDir() { + numLinks = 0 + } + username := "root" + groupname := "root" + mtime := dirent.ModTime() + monthStr := mtime.Month().String()[0:3] + day := mtime.Day() + year := mtime.Year() + now := time.Now() + isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) + + yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) + if isOld { + yearOrTime = fmt.Sprintf("%d", year) + } + + return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) +} diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go new file mode 100644 index 00000000..8d0c138c --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_unix.go @@ -0,0 +1,54 @@ +// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris +// +build cgo + +package sftp + +import ( + "fmt" + "os" + "path" + "syscall" + "time" +) + +func runLsStatt(dirent os.FileInfo, statt *syscall.Stat_t) string { + // example from openssh sftp server: + // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd + // format: + // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name + + typeword := runLsTypeWord(dirent) + numLinks := statt.Nlink + uid := statt.Uid + gid := statt.Gid + username := fmt.Sprintf("%d", uid) + groupname := fmt.Sprintf("%d", gid) + // TODO FIXME: uid -> username, gid -> groupname lookup for ls -l format output + + mtime := dirent.ModTime() + monthStr := mtime.Month().String()[0:3] + day := mtime.Day() + year := mtime.Year() + now := time.Now() + isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) + + yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) + if isOld { + yearOrTime = fmt.Sprintf("%d", year) + } + + return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) +} + +// ls -l style output for a file, which is in the 'long output' section of a readdir response packet +// this is a very simple (lazy) implementation, just enough to look almost like openssh in a few basic cases +func runLs(dirname string, dirent os.FileInfo) string { + dsys := dirent.Sys() + if dsys == nil { + } else if statt, ok := dsys.(*syscall.Stat_t); !ok { + } else { + return runLsStatt(dirent, statt) + } + + return path.Join(dirname, dirent.Name()) +} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go new file mode 100644 index 00000000..3cdb14df --- /dev/null +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -0,0 +1,217 @@ +// Package sftp implements the SSH File Transfer Protocol as described in +// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +package sftp + +import ( + "fmt" + + "github.com/pkg/errors" +) + +const ( + ssh_FXP_INIT = 1 + ssh_FXP_VERSION = 2 + ssh_FXP_OPEN = 3 + ssh_FXP_CLOSE = 4 + ssh_FXP_READ = 5 + ssh_FXP_WRITE = 6 + ssh_FXP_LSTAT = 7 + ssh_FXP_FSTAT = 8 + ssh_FXP_SETSTAT = 9 + ssh_FXP_FSETSTAT = 10 + ssh_FXP_OPENDIR = 11 + ssh_FXP_READDIR = 12 + ssh_FXP_REMOVE = 13 + ssh_FXP_MKDIR = 14 + ssh_FXP_RMDIR = 15 + ssh_FXP_REALPATH = 16 + ssh_FXP_STAT = 17 + ssh_FXP_RENAME = 18 + ssh_FXP_READLINK = 19 + ssh_FXP_SYMLINK = 20 + ssh_FXP_STATUS = 101 + ssh_FXP_HANDLE = 102 + ssh_FXP_DATA = 103 + ssh_FXP_NAME = 104 + ssh_FXP_ATTRS = 105 + ssh_FXP_EXTENDED = 200 + ssh_FXP_EXTENDED_REPLY = 201 +) + +const ( + ssh_FX_OK = 0 + ssh_FX_EOF = 1 + ssh_FX_NO_SUCH_FILE = 2 + ssh_FX_PERMISSION_DENIED = 3 + ssh_FX_FAILURE = 4 + ssh_FX_BAD_MESSAGE = 5 + ssh_FX_NO_CONNECTION = 6 + ssh_FX_CONNECTION_LOST = 7 + ssh_FX_OP_UNSUPPORTED = 8 + + // see draft-ietf-secsh-filexfer-13 + // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 + ssh_FX_INVALID_HANDLE = 9 + ssh_FX_NO_SUCH_PATH = 10 + ssh_FX_FILE_ALREADY_EXISTS = 11 + ssh_FX_WRITE_PROTECT = 12 + ssh_FX_NO_MEDIA = 13 + ssh_FX_NO_SPACE_ON_FILESYSTEM = 14 + ssh_FX_QUOTA_EXCEEDED = 15 + ssh_FX_UNKNOWN_PRINCIPAL = 16 + ssh_FX_LOCK_CONFLICT = 17 + ssh_FX_DIR_NOT_EMPTY = 18 + ssh_FX_NOT_A_DIRECTORY = 19 + ssh_FX_INVALID_FILENAME = 20 + ssh_FX_LINK_LOOP = 21 + ssh_FX_CANNOT_DELETE = 22 + ssh_FX_INVALID_PARAMETER = 23 + ssh_FX_FILE_IS_A_DIRECTORY = 24 + ssh_FX_BYTE_RANGE_LOCK_CONFLICT = 25 + ssh_FX_BYTE_RANGE_LOCK_REFUSED = 26 + ssh_FX_DELETE_PENDING = 27 + ssh_FX_FILE_CORRUPT = 28 + ssh_FX_OWNER_INVALID = 29 + ssh_FX_GROUP_INVALID = 30 + ssh_FX_NO_MATCHING_BYTE_RANGE_LOCK = 31 +) + +const ( + ssh_FXF_READ = 0x00000001 + ssh_FXF_WRITE = 0x00000002 + ssh_FXF_APPEND = 0x00000004 + ssh_FXF_CREAT = 0x00000008 + ssh_FXF_TRUNC = 0x00000010 + ssh_FXF_EXCL = 0x00000020 +) + +type fxp uint8 + +func (f fxp) String() string { + switch f { + case ssh_FXP_INIT: + return "SSH_FXP_INIT" + case ssh_FXP_VERSION: + return "SSH_FXP_VERSION" + case ssh_FXP_OPEN: + return "SSH_FXP_OPEN" + case ssh_FXP_CLOSE: + return "SSH_FXP_CLOSE" + case ssh_FXP_READ: + return "SSH_FXP_READ" + case ssh_FXP_WRITE: + return "SSH_FXP_WRITE" + case ssh_FXP_LSTAT: + return "SSH_FXP_LSTAT" + case ssh_FXP_FSTAT: + return "SSH_FXP_FSTAT" + case ssh_FXP_SETSTAT: + return "SSH_FXP_SETSTAT" + case ssh_FXP_FSETSTAT: + return "SSH_FXP_FSETSTAT" + case ssh_FXP_OPENDIR: + return "SSH_FXP_OPENDIR" + case ssh_FXP_READDIR: + return "SSH_FXP_READDIR" + case ssh_FXP_REMOVE: + return "SSH_FXP_REMOVE" + case ssh_FXP_MKDIR: + return "SSH_FXP_MKDIR" + case ssh_FXP_RMDIR: + return "SSH_FXP_RMDIR" + case ssh_FXP_REALPATH: + return "SSH_FXP_REALPATH" + case ssh_FXP_STAT: + return "SSH_FXP_STAT" + case ssh_FXP_RENAME: + return "SSH_FXP_RENAME" + case ssh_FXP_READLINK: + return "SSH_FXP_READLINK" + case ssh_FXP_SYMLINK: + return "SSH_FXP_SYMLINK" + case ssh_FXP_STATUS: + return "SSH_FXP_STATUS" + case ssh_FXP_HANDLE: + return "SSH_FXP_HANDLE" + case ssh_FXP_DATA: + return "SSH_FXP_DATA" + case ssh_FXP_NAME: + return "SSH_FXP_NAME" + case ssh_FXP_ATTRS: + return "SSH_FXP_ATTRS" + case ssh_FXP_EXTENDED: + return "SSH_FXP_EXTENDED" + case ssh_FXP_EXTENDED_REPLY: + return "SSH_FXP_EXTENDED_REPLY" + default: + return "unknown" + } +} + +type fx uint8 + +func (f fx) String() string { + switch f { + case ssh_FX_OK: + return "SSH_FX_OK" + case ssh_FX_EOF: + return "SSH_FX_EOF" + case ssh_FX_NO_SUCH_FILE: + return "SSH_FX_NO_SUCH_FILE" + case ssh_FX_PERMISSION_DENIED: + return "SSH_FX_PERMISSION_DENIED" + case ssh_FX_FAILURE: + return "SSH_FX_FAILURE" + case ssh_FX_BAD_MESSAGE: + return "SSH_FX_BAD_MESSAGE" + case ssh_FX_NO_CONNECTION: + return "SSH_FX_NO_CONNECTION" + case ssh_FX_CONNECTION_LOST: + return "SSH_FX_CONNECTION_LOST" + case ssh_FX_OP_UNSUPPORTED: + return "SSH_FX_OP_UNSUPPORTED" + default: + return "unknown" + } +} + +type unexpectedPacketErr struct { + want, got uint8 +} + +func (u *unexpectedPacketErr) Error() string { + return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) +} + +func unimplementedPacketErr(u uint8) error { + return errors.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) +} + +type unexpectedIDErr struct{ want, got uint32 } + +func (u *unexpectedIDErr) Error() string { + return fmt.Sprintf("sftp: unexpected id: want %v, got %v", u.want, u.got) +} + +func unimplementedSeekWhence(whence int) error { + return errors.Errorf("sftp: unimplemented seek whence %v", whence) +} + +func unexpectedCount(want, got uint32) error { + return errors.Errorf("sftp: unexpected count: want %v, got %v", want, got) +} + +type unexpectedVersionErr struct{ want, got uint32 } + +func (u *unexpectedVersionErr) Error() string { + return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got) +} + +// A StatusError is returned when an SFTP operation fails, and provides +// additional information about the failure. +type StatusError struct { + Code uint32 + msg, lang string +} + +func (s *StatusError) Error() string { return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) } |