Merge pull request #11853 from hashicorp/jbardin/ssh-panic
Update ssh and add failing test for long streams
This commit is contained in:
commit
8271a739f2
|
@ -292,8 +292,25 @@ func (c *Communicator) Upload(path string, input io.Reader) error {
|
|||
// which works for unix and windows
|
||||
targetDir = filepath.ToSlash(targetDir)
|
||||
|
||||
// Skip copying if we can get the file size directly from common io.Readers
|
||||
size := int64(0)
|
||||
|
||||
switch src := input.(type) {
|
||||
case *os.File:
|
||||
fi, err := src.Stat()
|
||||
if err != nil {
|
||||
size = fi.Size()
|
||||
}
|
||||
case *bytes.Buffer:
|
||||
size = int64(src.Len())
|
||||
case *bytes.Reader:
|
||||
size = int64(src.Len())
|
||||
case *strings.Reader:
|
||||
size = int64(src.Len())
|
||||
}
|
||||
|
||||
scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error {
|
||||
return scpUploadFile(targetFile, input, w, stdoutR)
|
||||
return scpUploadFile(targetFile, input, w, stdoutR, size)
|
||||
}
|
||||
|
||||
return c.scpSession("scp -vt "+targetDir, scpFunc)
|
||||
|
@ -490,45 +507,50 @@ func checkSCPStatus(r *bufio.Reader) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader) error {
|
||||
// Create a temporary file where we can copy the contents of the src
|
||||
// so that we can determine the length, since SCP is length-prefixed.
|
||||
tf, err := ioutil.TempFile("", "terraform-upload")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating temporary file for upload: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
defer tf.Close()
|
||||
func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, size int64) error {
|
||||
if size == 0 {
|
||||
// Create a temporary file where we can copy the contents of the src
|
||||
// so that we can determine the length, since SCP is length-prefixed.
|
||||
tf, err := ioutil.TempFile("", "terraform-upload")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating temporary file for upload: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
defer tf.Close()
|
||||
|
||||
log.Println("Copying input data into temporary file so we can read the length")
|
||||
if _, err := io.Copy(tf, src); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Println("Copying input data into temporary file so we can read the length")
|
||||
if _, err := io.Copy(tf, src); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sync the file so that the contents are definitely on disk, then
|
||||
// read the length of it.
|
||||
if err := tf.Sync(); err != nil {
|
||||
return fmt.Errorf("Error creating temporary file for upload: %s", err)
|
||||
}
|
||||
// Sync the file so that the contents are definitely on disk, then
|
||||
// read the length of it.
|
||||
if err := tf.Sync(); err != nil {
|
||||
return fmt.Errorf("Error creating temporary file for upload: %s", err)
|
||||
}
|
||||
|
||||
// Seek the file to the beginning so we can re-read all of it
|
||||
if _, err := tf.Seek(0, 0); err != nil {
|
||||
return fmt.Errorf("Error creating temporary file for upload: %s", err)
|
||||
}
|
||||
// Seek the file to the beginning so we can re-read all of it
|
||||
if _, err := tf.Seek(0, 0); err != nil {
|
||||
return fmt.Errorf("Error creating temporary file for upload: %s", err)
|
||||
}
|
||||
|
||||
fi, err := tf.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating temporary file for upload: %s", err)
|
||||
fi, err := tf.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating temporary file for upload: %s", err)
|
||||
}
|
||||
|
||||
src = tf
|
||||
size = fi.Size()
|
||||
}
|
||||
|
||||
// Start the protocol
|
||||
log.Println("Beginning file upload...")
|
||||
fmt.Fprintln(w, "C0644", fi.Size(), dst)
|
||||
fmt.Fprintln(w, "C0644", size, dst)
|
||||
if err := checkSCPStatus(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(w, tf); err != nil {
|
||||
if _, err := io.Copy(w, src); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -592,7 +614,7 @@ func scpUploadDir(root string, fs []os.FileInfo, w io.Writer, r *bufio.Reader) e
|
|||
|
||||
err = func() error {
|
||||
defer f.Close()
|
||||
return scpUploadFile(fi.Name(), f, w, r)
|
||||
return scpUploadFile(fi.Name(), f, w, r, fi.Size())
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -3,9 +3,15 @@
|
|||
package ssh
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -165,6 +171,117 @@ func TestStart(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAccUploadFile(t *testing.T) {
|
||||
// use the local ssh server and scp binary to check uploads
|
||||
if ok := os.Getenv("SSH_UPLOAD_TEST"); ok == "" {
|
||||
t.Log("Skipping Upload Acceptance without SSH_UPLOAD_TEST set")
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
r := &terraform.InstanceState{
|
||||
Ephemeral: terraform.EphemeralState{
|
||||
ConnInfo: map[string]string{
|
||||
"type": "ssh",
|
||||
"user": os.Getenv("USER"),
|
||||
"host": "127.0.0.1",
|
||||
"port": "22",
|
||||
"timeout": "30s",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
c, err := New(r)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating communicator: %s", err)
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "communicator")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
content := []byte("this is the file content")
|
||||
source := bytes.NewReader(content)
|
||||
tmpFile := filepath.Join(tmpDir, "tempFile.out")
|
||||
err = c.Upload(tmpFile, source)
|
||||
if err != nil {
|
||||
t.Fatalf("error uploading file: %s", err)
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadFile(tmpFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(data, content) {
|
||||
t.Fatalf("bad: %s", data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccHugeUploadFile(t *testing.T) {
|
||||
// use the local ssh server and scp binary to check uploads
|
||||
if ok := os.Getenv("SSH_UPLOAD_TEST"); ok == "" {
|
||||
t.Log("Skipping Upload Acceptance without SSH_UPLOAD_TEST set")
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
r := &terraform.InstanceState{
|
||||
Ephemeral: terraform.EphemeralState{
|
||||
ConnInfo: map[string]string{
|
||||
"type": "ssh",
|
||||
"user": os.Getenv("USER"),
|
||||
"host": "127.0.0.1",
|
||||
"port": "22",
|
||||
"timeout": "30s",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
c, err := New(r)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating communicator: %s", err)
|
||||
}
|
||||
|
||||
// copy 4GB of data, random to prevent compression.
|
||||
size := int64(1 << 32)
|
||||
source := io.LimitReader(rand.New(rand.NewSource(0)), size)
|
||||
|
||||
dest, err := ioutil.TempFile("", "communicator")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
destName := dest.Name()
|
||||
dest.Close()
|
||||
defer os.Remove(destName)
|
||||
|
||||
t.Log("Uploading to", destName)
|
||||
|
||||
// bypass the Upload method so we can directly supply the file size
|
||||
// preventing the extra copy of the huge file.
|
||||
targetDir := filepath.Dir(destName)
|
||||
targetFile := filepath.Base(destName)
|
||||
|
||||
scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error {
|
||||
return scpUploadFile(targetFile, source, w, stdoutR, size)
|
||||
}
|
||||
|
||||
err = c.scpSession("scp -vt "+targetDir, scpFunc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check the final file size
|
||||
fs, err := os.Stat(destName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if fs.Size() != size {
|
||||
t.Fatalf("expected file size of %d, got %d", size, fs.Size())
|
||||
}
|
||||
}
|
||||
|
||||
func TestScriptPath(t *testing.T) {
|
||||
cases := []struct {
|
||||
Input string
|
||||
|
|
|
@ -461,8 +461,8 @@ func (m *mux) newChannel(chanType string, direction channelDirection, extraData
|
|||
pending: newBuffer(),
|
||||
extPending: newBuffer(),
|
||||
direction: direction,
|
||||
incomingRequests: make(chan *Request, 16),
|
||||
msg: make(chan interface{}, 16),
|
||||
incomingRequests: make(chan *Request, chanSize),
|
||||
msg: make(chan interface{}, chanSize),
|
||||
chanType: chanType,
|
||||
extraData: extraData,
|
||||
mux: m,
|
||||
|
|
|
@ -135,6 +135,7 @@ const prefixLen = 5
|
|||
type streamPacketCipher struct {
|
||||
mac hash.Hash
|
||||
cipher cipher.Stream
|
||||
etm bool
|
||||
|
||||
// The following members are to avoid per-packet allocations.
|
||||
prefix [prefixLen]byte
|
||||
|
@ -150,7 +151,14 @@ func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, err
|
|||
return nil, err
|
||||
}
|
||||
|
||||
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
||||
var encryptedPaddingLength [1]byte
|
||||
if s.mac != nil && s.etm {
|
||||
copy(encryptedPaddingLength[:], s.prefix[4:5])
|
||||
s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
|
||||
} else {
|
||||
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
||||
}
|
||||
|
||||
length := binary.BigEndian.Uint32(s.prefix[0:4])
|
||||
paddingLength := uint32(s.prefix[4])
|
||||
|
||||
|
@ -159,7 +167,12 @@ func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, err
|
|||
s.mac.Reset()
|
||||
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
|
||||
s.mac.Write(s.seqNumBytes[:])
|
||||
s.mac.Write(s.prefix[:])
|
||||
if s.etm {
|
||||
s.mac.Write(s.prefix[:4])
|
||||
s.mac.Write(encryptedPaddingLength[:])
|
||||
} else {
|
||||
s.mac.Write(s.prefix[:])
|
||||
}
|
||||
macSize = uint32(s.mac.Size())
|
||||
}
|
||||
|
||||
|
@ -184,10 +197,17 @@ func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, err
|
|||
}
|
||||
mac := s.packetData[length-1:]
|
||||
data := s.packetData[:length-1]
|
||||
|
||||
if s.mac != nil && s.etm {
|
||||
s.mac.Write(data)
|
||||
}
|
||||
|
||||
s.cipher.XORKeyStream(data, data)
|
||||
|
||||
if s.mac != nil {
|
||||
s.mac.Write(data)
|
||||
if !s.etm {
|
||||
s.mac.Write(data)
|
||||
}
|
||||
s.macResult = s.mac.Sum(s.macResult[:0])
|
||||
if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
|
||||
return nil, errors.New("ssh: MAC failure")
|
||||
|
@ -203,7 +223,13 @@ func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Rea
|
|||
return errors.New("ssh: packet too large")
|
||||
}
|
||||
|
||||
paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple
|
||||
aadlen := 0
|
||||
if s.mac != nil && s.etm {
|
||||
// packet length is not encrypted for EtM modes
|
||||
aadlen = 4
|
||||
}
|
||||
|
||||
paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple
|
||||
if paddingLength < 4 {
|
||||
paddingLength += packetSizeMultiple
|
||||
}
|
||||
|
@ -220,15 +246,37 @@ func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Rea
|
|||
s.mac.Reset()
|
||||
binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
|
||||
s.mac.Write(s.seqNumBytes[:])
|
||||
|
||||
if s.etm {
|
||||
// For EtM algorithms, the packet length must stay unencrypted,
|
||||
// but the following data (padding length) must be encrypted
|
||||
s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
|
||||
}
|
||||
|
||||
s.mac.Write(s.prefix[:])
|
||||
|
||||
if !s.etm {
|
||||
// For non-EtM algorithms, the algorithm is applied on unencrypted data
|
||||
s.mac.Write(packet)
|
||||
s.mac.Write(padding)
|
||||
}
|
||||
}
|
||||
|
||||
if !(s.mac != nil && s.etm) {
|
||||
// For EtM algorithms, the padding length has already been encrypted
|
||||
// and the packet length must remain unencrypted
|
||||
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
||||
}
|
||||
|
||||
s.cipher.XORKeyStream(packet, packet)
|
||||
s.cipher.XORKeyStream(padding, padding)
|
||||
|
||||
if s.mac != nil && s.etm {
|
||||
// For EtM algorithms, packet and padding must be encrypted
|
||||
s.mac.Write(packet)
|
||||
s.mac.Write(padding)
|
||||
}
|
||||
|
||||
s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
|
||||
s.cipher.XORKeyStream(packet, packet)
|
||||
s.cipher.XORKeyStream(padding, padding)
|
||||
|
||||
if _, err := w.Write(s.prefix[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
|
|||
return nil
|
||||
}
|
||||
|
||||
ch = make(chan NewChannel, 16)
|
||||
ch = make(chan NewChannel, chanSize)
|
||||
c.channelHandlers[channelType] = ch
|
||||
return ch
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ var supportedHostKeyAlgos = []string{
|
|||
// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
|
||||
// because they have reached the end of their useful life.
|
||||
var supportedMACs = []string{
|
||||
"hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
|
||||
"hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
|
||||
}
|
||||
|
||||
var supportedCompressions = []string{compressionNone}
|
||||
|
@ -104,6 +104,21 @@ type directionAlgorithms struct {
|
|||
Compression string
|
||||
}
|
||||
|
||||
// rekeyBytes returns a rekeying intervals in bytes.
|
||||
func (a *directionAlgorithms) rekeyBytes() int64 {
|
||||
// According to RFC4344 block ciphers should rekey after
|
||||
// 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is
|
||||
// 128.
|
||||
switch a.Cipher {
|
||||
case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID:
|
||||
return 16 * (1 << 32)
|
||||
|
||||
}
|
||||
|
||||
// For others, stick with RFC4253 recommendation to rekey after 1 Gb of data.
|
||||
return 1 << 30
|
||||
}
|
||||
|
||||
type algorithms struct {
|
||||
kex string
|
||||
hostKey string
|
||||
|
|
|
@ -19,6 +19,11 @@ import (
|
|||
// messages are wrong when using ECDH.
|
||||
const debugHandshake = false
|
||||
|
||||
// chanSize sets the amount of buffering SSH connections. This is
|
||||
// primarily for testing: setting chanSize=0 uncovers deadlocks more
|
||||
// quickly.
|
||||
const chanSize = 16
|
||||
|
||||
// keyingTransport is a packet based transport that supports key
|
||||
// changes. It need not be thread-safe. It should pass through
|
||||
// msgNewKeys in both directions.
|
||||
|
@ -60,7 +65,8 @@ type handshakeTransport struct {
|
|||
pendingPackets [][]byte // Used when a key exchange is in progress.
|
||||
|
||||
// If the read loop wants to schedule a kex, it pings this
|
||||
// channel, and the write loop will send out a kex message.
|
||||
// channel, and the write loop will send out a kex
|
||||
// message.
|
||||
requestKex chan struct{}
|
||||
|
||||
// If the other side requests or confirms a kex, its kexInit
|
||||
|
@ -72,9 +78,14 @@ type handshakeTransport struct {
|
|||
dialAddress string
|
||||
remoteAddr net.Addr
|
||||
|
||||
readSinceKex uint64
|
||||
// Algorithms agreed in the last key exchange.
|
||||
algorithms *algorithms
|
||||
|
||||
writtenSinceKex uint64
|
||||
readPacketsLeft uint32
|
||||
readBytesLeft int64
|
||||
|
||||
writePacketsLeft uint32
|
||||
writeBytesLeft int64
|
||||
|
||||
// The session ID or nil if first kex did not complete yet.
|
||||
sessionID []byte
|
||||
|
@ -90,12 +101,15 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion,
|
|||
conn: conn,
|
||||
serverVersion: serverVersion,
|
||||
clientVersion: clientVersion,
|
||||
incoming: make(chan []byte, 16),
|
||||
incoming: make(chan []byte, chanSize),
|
||||
requestKex: make(chan struct{}, 1),
|
||||
startKex: make(chan *pendingKex, 1),
|
||||
|
||||
config: config,
|
||||
}
|
||||
|
||||
// We always start with a mandatory key exchange.
|
||||
t.requestKex <- struct{}{}
|
||||
return t
|
||||
}
|
||||
|
||||
|
@ -152,6 +166,7 @@ func (t *handshakeTransport) printPacket(p []byte, write bool) {
|
|||
if write {
|
||||
action = "sent"
|
||||
}
|
||||
|
||||
if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
|
||||
log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p))
|
||||
} else {
|
||||
|
@ -169,12 +184,6 @@ func (t *handshakeTransport) readPacket() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (t *handshakeTransport) readLoop() {
|
||||
// We always start with the mandatory key exchange. We use
|
||||
// the channel for simplicity, and this works if we can rely
|
||||
// on the SSH package itself not doing anything else before
|
||||
// waitSession has completed.
|
||||
t.requestKeyExchange()
|
||||
|
||||
first := true
|
||||
for {
|
||||
p, err := t.readOnePacket(first)
|
||||
|
@ -226,10 +235,10 @@ func (t *handshakeTransport) requestKeyExchange() {
|
|||
default:
|
||||
// something already requested a kex, so do nothing.
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) kexLoop() {
|
||||
|
||||
write:
|
||||
for t.getWriteError() == nil {
|
||||
var request *pendingKex
|
||||
|
@ -243,6 +252,7 @@ write:
|
|||
break write
|
||||
}
|
||||
case <-t.requestKex:
|
||||
break
|
||||
}
|
||||
|
||||
if !sent {
|
||||
|
@ -266,7 +276,8 @@ write:
|
|||
|
||||
// We're not servicing t.startKex, but the remote end
|
||||
// has just sent us a kexInitMsg, so it can't send
|
||||
// another key change request.
|
||||
// another key change request, until we close the done
|
||||
// channel on the pendingKex request.
|
||||
|
||||
err := t.enterKeyExchange(request.otherInit)
|
||||
|
||||
|
@ -274,7 +285,29 @@ write:
|
|||
t.writeError = err
|
||||
t.sentInitPacket = nil
|
||||
t.sentInitMsg = nil
|
||||
t.writtenSinceKex = 0
|
||||
t.writePacketsLeft = packetRekeyThreshold
|
||||
if t.config.RekeyThreshold > 0 {
|
||||
t.writeBytesLeft = int64(t.config.RekeyThreshold)
|
||||
} else if t.algorithms != nil {
|
||||
t.writeBytesLeft = t.algorithms.w.rekeyBytes()
|
||||
}
|
||||
|
||||
// we have completed the key exchange. Since the
|
||||
// reader is still blocked, it is safe to clear out
|
||||
// the requestKex channel. This avoids the situation
|
||||
// where: 1) we consumed our own request for the
|
||||
// initial kex, and 2) the kex from the remote side
|
||||
// caused another send on the requestKex channel,
|
||||
clear:
|
||||
for {
|
||||
select {
|
||||
case <-t.requestKex:
|
||||
//
|
||||
default:
|
||||
break clear
|
||||
}
|
||||
}
|
||||
|
||||
request.done <- t.writeError
|
||||
|
||||
// kex finished. Push packets that we received while
|
||||
|
@ -288,7 +321,7 @@ write:
|
|||
break
|
||||
}
|
||||
}
|
||||
t.pendingPackets = t.pendingPackets[0:]
|
||||
t.pendingPackets = t.pendingPackets[:0]
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
|
@ -304,17 +337,31 @@ write:
|
|||
t.conn.Close()
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
|
||||
if t.readSinceKex > t.config.RekeyThreshold {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
// The protocol uses uint32 for packet counters, so we can't let them
|
||||
// reach 1<<32. We will actually read and write more packets than
|
||||
// this, though: the other side may send more packets, and after we
|
||||
// hit this limit on writing we will send a few more packets for the
|
||||
// key exchange itself.
|
||||
const packetRekeyThreshold = (1 << 31)
|
||||
|
||||
func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
|
||||
p, err := t.conn.readPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.readSinceKex += uint64(len(p))
|
||||
if t.readPacketsLeft > 0 {
|
||||
t.readPacketsLeft--
|
||||
} else {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
|
||||
if t.readBytesLeft > 0 {
|
||||
t.readBytesLeft -= int64(len(p))
|
||||
} else {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
|
||||
if debugHandshake {
|
||||
t.printPacket(p, false)
|
||||
}
|
||||
|
@ -344,7 +391,12 @@ func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
t.readSinceKex = 0
|
||||
t.readPacketsLeft = packetRekeyThreshold
|
||||
if t.config.RekeyThreshold > 0 {
|
||||
t.readBytesLeft = int64(t.config.RekeyThreshold)
|
||||
} else {
|
||||
t.readBytesLeft = t.algorithms.r.rekeyBytes()
|
||||
}
|
||||
|
||||
// By default, a key exchange is hidden from higher layers by
|
||||
// translating it into msgIgnore.
|
||||
|
@ -427,8 +479,16 @@ func (t *handshakeTransport) writePacket(p []byte) error {
|
|||
t.pendingPackets = append(t.pendingPackets, cp)
|
||||
return nil
|
||||
}
|
||||
t.writtenSinceKex += uint64(len(p))
|
||||
if t.writtenSinceKex > t.config.RekeyThreshold {
|
||||
|
||||
if t.writeBytesLeft > 0 {
|
||||
t.writeBytesLeft -= int64(len(p))
|
||||
} else {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
|
||||
if t.writePacketsLeft > 0 {
|
||||
t.writePacketsLeft--
|
||||
} else {
|
||||
t.requestKeyExchange()
|
||||
}
|
||||
|
||||
|
@ -469,7 +529,8 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
|||
magics.serverKexInit = otherInitPacket
|
||||
}
|
||||
|
||||
algs, err := findAgreedAlgorithms(clientInit, serverInit)
|
||||
var err error
|
||||
t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -492,16 +553,16 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
|||
}
|
||||
}
|
||||
|
||||
kex, ok := kexAlgoMap[algs.kex]
|
||||
kex, ok := kexAlgoMap[t.algorithms.kex]
|
||||
if !ok {
|
||||
return fmt.Errorf("ssh: unexpected key exchange algorithm %v", algs.kex)
|
||||
return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex)
|
||||
}
|
||||
|
||||
var result *kexResult
|
||||
if len(t.hostKeys) > 0 {
|
||||
result, err = t.server(kex, algs, &magics)
|
||||
result, err = t.server(kex, t.algorithms, &magics)
|
||||
} else {
|
||||
result, err = t.client(kex, algs, &magics)
|
||||
result, err = t.client(kex, t.algorithms, &magics)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
@ -513,7 +574,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
|||
}
|
||||
result.SessionID = t.sessionID
|
||||
|
||||
t.conn.prepareKeyChange(algs, result)
|
||||
t.conn.prepareKeyChange(t.algorithms, result)
|
||||
if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
|
||||
type macMode struct {
|
||||
keySize int
|
||||
etm bool
|
||||
new func(key []byte) hash.Hash
|
||||
}
|
||||
|
||||
|
@ -45,13 +46,16 @@ func (t truncatingMAC) Size() int {
|
|||
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
|
||||
|
||||
var macModes = map[string]*macMode{
|
||||
"hmac-sha2-256": {32, func(key []byte) hash.Hash {
|
||||
"hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash {
|
||||
return hmac.New(sha256.New, key)
|
||||
}},
|
||||
"hmac-sha1": {20, func(key []byte) hash.Hash {
|
||||
"hmac-sha2-256": {32, false, func(key []byte) hash.Hash {
|
||||
return hmac.New(sha256.New, key)
|
||||
}},
|
||||
"hmac-sha1": {20, false, func(key []byte) hash.Hash {
|
||||
return hmac.New(sha1.New, key)
|
||||
}},
|
||||
"hmac-sha1-96": {20, func(key []byte) hash.Hash {
|
||||
"hmac-sha1-96": {20, false, func(key []byte) hash.Hash {
|
||||
return truncatingMAC{12, hmac.New(sha1.New, key)}
|
||||
}},
|
||||
}
|
||||
|
|
|
@ -116,9 +116,9 @@ func (m *mux) Wait() error {
|
|||
func newMux(p packetConn) *mux {
|
||||
m := &mux{
|
||||
conn: p,
|
||||
incomingChannels: make(chan NewChannel, 16),
|
||||
incomingChannels: make(chan NewChannel, chanSize),
|
||||
globalResponses: make(chan interface{}, 1),
|
||||
incomingRequests: make(chan *Request, 16),
|
||||
incomingRequests: make(chan *Request, chanSize),
|
||||
errCond: newCond(),
|
||||
}
|
||||
if debugMux {
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Permissions type holds fine-grained permissions that are
|
||||
|
@ -231,7 +232,7 @@ func isAcceptableAlgo(algo string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func checkSourceAddress(addr net.Addr, sourceAddr string) error {
|
||||
func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
|
||||
if addr == nil {
|
||||
return errors.New("ssh: no address known for client, but source-address match required")
|
||||
}
|
||||
|
@ -241,18 +242,20 @@ func checkSourceAddress(addr net.Addr, sourceAddr string) error {
|
|||
return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
|
||||
}
|
||||
|
||||
if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
|
||||
if allowedIP.Equal(tcpAddr.IP) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
_, ipNet, err := net.ParseCIDR(sourceAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
|
||||
}
|
||||
for _, sourceAddr := range strings.Split(sourceAddrs, ",") {
|
||||
if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
|
||||
if allowedIP.Equal(tcpAddr.IP) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
_, ipNet, err := net.ParseCIDR(sourceAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
|
||||
}
|
||||
|
||||
if ipNet.Contains(tcpAddr.IP) {
|
||||
return nil
|
||||
if ipNet.Contains(tcpAddr.IP) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,8 +8,13 @@ import (
|
|||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// debugTransport if set, will print packet types as they go over the
|
||||
// wire. No message decoding is done, to minimize the impact on timing.
|
||||
const debugTransport = false
|
||||
|
||||
const (
|
||||
gcmCipherID = "aes128-gcm@openssh.com"
|
||||
aes128cbcID = "aes128-cbc"
|
||||
|
@ -40,7 +45,7 @@ type transport struct {
|
|||
bufReader *bufio.Reader
|
||||
bufWriter *bufio.Writer
|
||||
rand io.Reader
|
||||
|
||||
isClient bool
|
||||
io.Closer
|
||||
}
|
||||
|
||||
|
@ -86,6 +91,22 @@ func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) err
|
|||
return nil
|
||||
}
|
||||
|
||||
func (t *transport) printPacket(p []byte, write bool) {
|
||||
if len(p) == 0 {
|
||||
return
|
||||
}
|
||||
who := "server"
|
||||
if t.isClient {
|
||||
who = "client"
|
||||
}
|
||||
what := "read"
|
||||
if write {
|
||||
what = "write"
|
||||
}
|
||||
|
||||
log.Println(what, who, p[0])
|
||||
}
|
||||
|
||||
// Read and decrypt next packet.
|
||||
func (t *transport) readPacket() (p []byte, err error) {
|
||||
for {
|
||||
|
@ -97,6 +118,9 @@ func (t *transport) readPacket() (p []byte, err error) {
|
|||
break
|
||||
}
|
||||
}
|
||||
if debugTransport {
|
||||
t.printPacket(p, false)
|
||||
}
|
||||
|
||||
return p, err
|
||||
}
|
||||
|
@ -141,6 +165,9 @@ func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (t *transport) writePacket(packet []byte) error {
|
||||
if debugTransport {
|
||||
t.printPacket(packet, true)
|
||||
}
|
||||
return t.writer.writePacket(t.bufWriter, t.rand, packet)
|
||||
}
|
||||
|
||||
|
@ -181,6 +208,8 @@ func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transp
|
|||
},
|
||||
Closer: rwc,
|
||||
}
|
||||
t.isClient = isClient
|
||||
|
||||
if isClient {
|
||||
t.reader.dir = serverKeys
|
||||
t.writer.dir = clientKeys
|
||||
|
@ -238,6 +267,7 @@ func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (pac
|
|||
|
||||
c := &streamPacketCipher{
|
||||
mac: macModes[algs.MAC].new(macKey),
|
||||
etm: macModes[algs.MAC].etm,
|
||||
}
|
||||
c.macResult = make([]byte, c.mac.Size())
|
||||
|
||||
|
|
|
@ -2637,10 +2637,10 @@
|
|||
"revisionTime": "2017-01-13T19:21:00Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "H+feAfAdnIKoFeRjHBw5Bnp7vvs=",
|
||||
"checksumSHA1": "fsrFs762jlaILyqqQImS1GfvIvw=",
|
||||
"path": "golang.org/x/crypto/ssh",
|
||||
"revision": "b8a2a83acfe6e6770b75de42d5ff4c67596675c0",
|
||||
"revisionTime": "2017-01-13T19:21:00Z"
|
||||
"revision": "453249f01cfeb54c3d549ddb75ff152ca243f9d8",
|
||||
"revisionTime": "2017-02-08T20:51:15Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "SJ3Ma3Ozavxpbh1usZWBCnzMKIc=",
|
||||
|
|
Loading…
Reference in New Issue