vendor
This commit is contained in:
parent
51f99050f7
commit
b4af0b91e1
19
vendor/github.com/davidlazar/go-crypto/LICENSE
generated
vendored
Normal file
19
vendor/github.com/davidlazar/go-crypto/LICENSE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright (c) 2016 David Lazar <lazard@mit.edu>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
85
vendor/github.com/davidlazar/go-crypto/salsa20/salsa20.go
generated
vendored
Normal file
85
vendor/github.com/davidlazar/go-crypto/salsa20/salsa20.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package salsa20
|
||||
|
||||
import (
|
||||
"crypto/cipher"
|
||||
"encoding/binary"
|
||||
|
||||
"golang.org/x/crypto/salsa20/salsa"
|
||||
)
|
||||
|
||||
const BlockSize = 64
|
||||
|
||||
type salsaCipher struct {
|
||||
key *[32]byte
|
||||
nonce [8]byte
|
||||
x [BlockSize]byte
|
||||
nx int
|
||||
counter uint64
|
||||
}
|
||||
|
||||
func New(key *[32]byte, nonce []byte) cipher.Stream {
|
||||
c := new(salsaCipher)
|
||||
|
||||
if len(nonce) == 24 {
|
||||
var subKey [32]byte
|
||||
var hNonce [16]byte
|
||||
copy(hNonce[:], nonce[:16])
|
||||
salsa.HSalsa20(&subKey, &hNonce, key, &salsa.Sigma)
|
||||
copy(c.nonce[:], nonce[16:])
|
||||
c.key = &subKey
|
||||
} else if len(nonce) == 8 {
|
||||
c.key = key
|
||||
copy(c.nonce[:], nonce)
|
||||
} else {
|
||||
panic("salsa20: nonce must be 8 or 24 bytes")
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *salsaCipher) XORKeyStream(dst, src []byte) {
|
||||
if len(dst) < len(src) {
|
||||
src = src[:len(dst)]
|
||||
}
|
||||
if c.nx > 0 {
|
||||
n := xorBytes(dst, src, c.x[c.nx:])
|
||||
c.nx += n
|
||||
if c.nx == BlockSize {
|
||||
c.nx = 0
|
||||
}
|
||||
src = src[n:]
|
||||
dst = dst[n:]
|
||||
}
|
||||
if len(src) > BlockSize {
|
||||
n := len(src) &^ (BlockSize - 1)
|
||||
c.blocks(dst, src[:n])
|
||||
src = src[n:]
|
||||
dst = dst[n:]
|
||||
}
|
||||
if len(src) > 0 {
|
||||
c.nx = copy(c.x[:], src)
|
||||
for i := c.nx; i < len(c.x); i++ {
|
||||
c.x[i] = 0
|
||||
}
|
||||
c.blocks(c.x[:], c.x[:])
|
||||
copy(dst, c.x[:c.nx])
|
||||
}
|
||||
}
|
||||
|
||||
func (c *salsaCipher) blocks(dst, src []byte) {
|
||||
var nonce [16]byte
|
||||
copy(nonce[:], c.nonce[:])
|
||||
binary.LittleEndian.PutUint64(nonce[8:], c.counter)
|
||||
salsa.XORKeyStream(dst, src, &nonce, c.key)
|
||||
c.counter += uint64(len(src)) / 64
|
||||
}
|
||||
|
||||
func xorBytes(dst, a, b []byte) int {
|
||||
n := len(a)
|
||||
if len(b) < n {
|
||||
n = len(b)
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[i] = a[i] ^ b[i]
|
||||
}
|
||||
return n
|
||||
}
|
1
vendor/github.com/flynn/noise/CONTRIBUTING.md
generated
vendored
Normal file
1
vendor/github.com/flynn/noise/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
See the [Flynn contributing guide](https://flynn.io/docs/contributing).
|
29
vendor/github.com/flynn/noise/LICENSE
generated
vendored
Normal file
29
vendor/github.com/flynn/noise/LICENSE
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
Flynn® is a trademark of Prime Directive, Inc.
|
||||
|
||||
Copyright (c) 2015 Prime Directive, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Prime Directive, Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
5
vendor/github.com/flynn/noise/README.md
generated
vendored
Normal file
5
vendor/github.com/flynn/noise/README.md
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# noise [](https://pkg.go.dev/github.com/flynn/noise) [](https://github.com/flynn/noise/actions)
|
||||
|
||||
This is a Go package that implements the [Noise Protocol
|
||||
Framework](https://noiseprotocol.org). See [the
|
||||
documentation](https://pkg.go.dev/github.com/flynn/noise) for usage information.
|
224
vendor/github.com/flynn/noise/cipher_suite.go
generated
vendored
Normal file
224
vendor/github.com/flynn/noise/cipher_suite.go
generated
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
package noise
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/sha512"
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
"golang.org/x/crypto/blake2s"
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
)
|
||||
|
||||
// A DHKey is a keypair used for Diffie-Hellman key agreement.
|
||||
type DHKey struct {
|
||||
Private []byte
|
||||
Public []byte
|
||||
}
|
||||
|
||||
// A DHFunc implements Diffie-Hellman key agreement.
|
||||
type DHFunc interface {
|
||||
// GenerateKeypair generates a new keypair using random as a source of
|
||||
// entropy.
|
||||
GenerateKeypair(random io.Reader) (DHKey, error)
|
||||
|
||||
// DH performs a Diffie-Hellman calculation between the provided private and
|
||||
// public keys and returns the result.
|
||||
DH(privkey, pubkey []byte) ([]byte, error)
|
||||
|
||||
// DHLen is the number of bytes returned by DH.
|
||||
DHLen() int
|
||||
|
||||
// DHName is the name of the DH function.
|
||||
DHName() string
|
||||
}
|
||||
|
||||
// A HashFunc implements a cryptographic hash function.
|
||||
type HashFunc interface {
|
||||
// Hash returns a hash state.
|
||||
Hash() hash.Hash
|
||||
|
||||
// HashName is the name of the hash function.
|
||||
HashName() string
|
||||
}
|
||||
|
||||
// A CipherFunc implements an AEAD symmetric cipher.
|
||||
type CipherFunc interface {
|
||||
// Cipher initializes the algorithm with the provided key and returns a Cipher.
|
||||
Cipher(k [32]byte) Cipher
|
||||
|
||||
// CipherName is the name of the cipher.
|
||||
CipherName() string
|
||||
}
|
||||
|
||||
// A Cipher is a AEAD cipher that has been initialized with a key.
|
||||
type Cipher interface {
|
||||
// Encrypt encrypts the provided plaintext with a nonce and then appends the
|
||||
// ciphertext to out along with an authentication tag over the ciphertext
|
||||
// and optional authenticated data.
|
||||
Encrypt(out []byte, n uint64, ad, plaintext []byte) []byte
|
||||
|
||||
// Decrypt authenticates the ciphertext and optional authenticated data and
|
||||
// then decrypts the provided ciphertext using the provided nonce and
|
||||
// appends it to out.
|
||||
Decrypt(out []byte, n uint64, ad, ciphertext []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// A CipherSuite is a set of cryptographic primitives used in a Noise protocol.
|
||||
// It should be constructed with NewCipherSuite.
|
||||
type CipherSuite interface {
|
||||
DHFunc
|
||||
CipherFunc
|
||||
HashFunc
|
||||
Name() []byte
|
||||
}
|
||||
|
||||
// NewCipherSuite returns a CipherSuite constructed from the specified
|
||||
// primitives.
|
||||
func NewCipherSuite(dh DHFunc, c CipherFunc, h HashFunc) CipherSuite {
|
||||
return ciphersuite{
|
||||
DHFunc: dh,
|
||||
CipherFunc: c,
|
||||
HashFunc: h,
|
||||
name: []byte(dh.DHName() + "_" + c.CipherName() + "_" + h.HashName()),
|
||||
}
|
||||
}
|
||||
|
||||
type ciphersuite struct {
|
||||
DHFunc
|
||||
CipherFunc
|
||||
HashFunc
|
||||
name []byte
|
||||
}
|
||||
|
||||
func (s ciphersuite) Name() []byte { return s.name }
|
||||
|
||||
// DH25519 is the Curve25519 ECDH function.
|
||||
var DH25519 DHFunc = dh25519{}
|
||||
|
||||
type dh25519 struct{}
|
||||
|
||||
func (dh25519) GenerateKeypair(rng io.Reader) (DHKey, error) {
|
||||
privkey := make([]byte, 32)
|
||||
if rng == nil {
|
||||
rng = rand.Reader
|
||||
}
|
||||
if _, err := io.ReadFull(rng, privkey); err != nil {
|
||||
return DHKey{}, err
|
||||
}
|
||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
return DHKey{}, err
|
||||
}
|
||||
return DHKey{Private: privkey, Public: pubkey}, nil
|
||||
}
|
||||
|
||||
func (dh25519) DH(privkey, pubkey []byte) ([]byte, error) {
|
||||
return curve25519.X25519(privkey, pubkey)
|
||||
}
|
||||
|
||||
func (dh25519) DHLen() int { return 32 }
|
||||
func (dh25519) DHName() string { return "25519" }
|
||||
|
||||
type cipherFn struct {
|
||||
fn func([32]byte) Cipher
|
||||
name string
|
||||
}
|
||||
|
||||
func (c cipherFn) Cipher(k [32]byte) Cipher { return c.fn(k) }
|
||||
func (c cipherFn) CipherName() string { return c.name }
|
||||
|
||||
// CipherAESGCM is the AES256-GCM AEAD cipher.
|
||||
var CipherAESGCM CipherFunc = cipherFn{cipherAESGCM, "AESGCM"}
|
||||
|
||||
func cipherAESGCM(k [32]byte) Cipher {
|
||||
c, err := aes.NewCipher(k[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
gcm, err := cipher.NewGCM(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return aeadCipher{
|
||||
gcm,
|
||||
func(n uint64) []byte {
|
||||
var nonce [12]byte
|
||||
binary.BigEndian.PutUint64(nonce[4:], n)
|
||||
return nonce[:]
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CipherChaChaPoly is the ChaCha20-Poly1305 AEAD cipher construction.
|
||||
var CipherChaChaPoly CipherFunc = cipherFn{cipherChaChaPoly, "ChaChaPoly"}
|
||||
|
||||
func cipherChaChaPoly(k [32]byte) Cipher {
|
||||
c, err := chacha20poly1305.New(k[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return aeadCipher{
|
||||
c,
|
||||
func(n uint64) []byte {
|
||||
var nonce [12]byte
|
||||
binary.LittleEndian.PutUint64(nonce[4:], n)
|
||||
return nonce[:]
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type aeadCipher struct {
|
||||
cipher.AEAD
|
||||
nonce func(uint64) []byte
|
||||
}
|
||||
|
||||
func (c aeadCipher) Encrypt(out []byte, n uint64, ad, plaintext []byte) []byte {
|
||||
return c.Seal(out, c.nonce(n), plaintext, ad)
|
||||
}
|
||||
|
||||
func (c aeadCipher) Decrypt(out []byte, n uint64, ad, ciphertext []byte) ([]byte, error) {
|
||||
return c.Open(out, c.nonce(n), ciphertext, ad)
|
||||
}
|
||||
|
||||
type hashFn struct {
|
||||
fn func() hash.Hash
|
||||
name string
|
||||
}
|
||||
|
||||
func (h hashFn) Hash() hash.Hash { return h.fn() }
|
||||
func (h hashFn) HashName() string { return h.name }
|
||||
|
||||
// HashSHA256 is the SHA-256 hash function.
|
||||
var HashSHA256 HashFunc = hashFn{sha256.New, "SHA256"}
|
||||
|
||||
// HashSHA512 is the SHA-512 hash function.
|
||||
var HashSHA512 HashFunc = hashFn{sha512.New, "SHA512"}
|
||||
|
||||
func blake2bNew() hash.Hash {
|
||||
h, err := blake2b.New512(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// HashBLAKE2b is the BLAKE2b hash function.
|
||||
var HashBLAKE2b HashFunc = hashFn{blake2bNew, "BLAKE2b"}
|
||||
|
||||
func blake2sNew() hash.Hash {
|
||||
h, err := blake2s.New256(nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// HashBLAKE2s is the BLAKE2s hash function.
|
||||
var HashBLAKE2s HashFunc = hashFn{blake2sNew, "BLAKE2s"}
|
8
vendor/github.com/flynn/noise/go.mod
generated
vendored
Normal file
8
vendor/github.com/flynn/noise/go.mod
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
module github.com/flynn/noise
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
)
|
15
vendor/github.com/flynn/noise/go.sum
generated
vendored
Normal file
15
vendor/github.com/flynn/noise/go.sum
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
49
vendor/github.com/flynn/noise/hkdf.go
generated
vendored
Normal file
49
vendor/github.com/flynn/noise/hkdf.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package noise
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"hash"
|
||||
)
|
||||
|
||||
func hkdf(h func() hash.Hash, outputs int, out1, out2, out3, chainingKey, inputKeyMaterial []byte) ([]byte, []byte, []byte) {
|
||||
if len(out1) > 0 {
|
||||
panic("len(out1) > 0")
|
||||
}
|
||||
if len(out2) > 0 {
|
||||
panic("len(out2) > 0")
|
||||
}
|
||||
if len(out3) > 0 {
|
||||
panic("len(out3) > 0")
|
||||
}
|
||||
if outputs > 3 {
|
||||
panic("outputs > 3")
|
||||
}
|
||||
|
||||
tempMAC := hmac.New(h, chainingKey)
|
||||
tempMAC.Write(inputKeyMaterial)
|
||||
tempKey := tempMAC.Sum(out2)
|
||||
|
||||
out1MAC := hmac.New(h, tempKey)
|
||||
out1MAC.Write([]byte{0x01})
|
||||
out1 = out1MAC.Sum(out1)
|
||||
|
||||
if outputs == 1 {
|
||||
return out1, nil, nil
|
||||
}
|
||||
|
||||
out2MAC := hmac.New(h, tempKey)
|
||||
out2MAC.Write(out1)
|
||||
out2MAC.Write([]byte{0x02})
|
||||
out2 = out2MAC.Sum(out2)
|
||||
|
||||
if outputs == 2 {
|
||||
return out1, out2, nil
|
||||
}
|
||||
|
||||
out3MAC := hmac.New(h, tempKey)
|
||||
out3MAC.Write(out2)
|
||||
out3MAC.Write([]byte{0x03})
|
||||
out3 = out3MAC.Sum(out3)
|
||||
|
||||
return out1, out2, out3
|
||||
}
|
141
vendor/github.com/flynn/noise/patterns.go
generated
vendored
Normal file
141
vendor/github.com/flynn/noise/patterns.go
generated
vendored
Normal file
@ -0,0 +1,141 @@
|
||||
package noise
|
||||
|
||||
var HandshakeNN = HandshakePattern{
|
||||
Name: "NN",
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE},
|
||||
{MessagePatternE, MessagePatternDHEE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeKN = HandshakePattern{
|
||||
Name: "KN",
|
||||
InitiatorPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE},
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternDHSE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeNK = HandshakePattern{
|
||||
Name: "NK",
|
||||
ResponderPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternDHES},
|
||||
{MessagePatternE, MessagePatternDHEE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeKK = HandshakePattern{
|
||||
Name: "KK",
|
||||
InitiatorPreMessages: []MessagePattern{MessagePatternS},
|
||||
ResponderPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternDHES, MessagePatternDHSS},
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternDHSE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeNX = HandshakePattern{
|
||||
Name: "NX",
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE},
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternS, MessagePatternDHES},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeKX = HandshakePattern{
|
||||
Name: "KX",
|
||||
InitiatorPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE},
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternDHSE, MessagePatternS, MessagePatternDHES},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeXN = HandshakePattern{
|
||||
Name: "XN",
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE},
|
||||
{MessagePatternE, MessagePatternDHEE},
|
||||
{MessagePatternS, MessagePatternDHSE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeIN = HandshakePattern{
|
||||
Name: "IN",
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternS},
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternDHSE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeXK = HandshakePattern{
|
||||
Name: "XK",
|
||||
ResponderPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternDHES},
|
||||
{MessagePatternE, MessagePatternDHEE},
|
||||
{MessagePatternS, MessagePatternDHSE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeIK = HandshakePattern{
|
||||
Name: "IK",
|
||||
ResponderPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternDHES, MessagePatternS, MessagePatternDHSS},
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternDHSE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeXX = HandshakePattern{
|
||||
Name: "XX",
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE},
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternS, MessagePatternDHES},
|
||||
{MessagePatternS, MessagePatternDHSE},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeXXfallback = HandshakePattern{
|
||||
Name: "XXfallback",
|
||||
ResponderPreMessages: []MessagePattern{MessagePatternE},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternS, MessagePatternDHSE},
|
||||
{MessagePatternS, MessagePatternDHES},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeIX = HandshakePattern{
|
||||
Name: "IX",
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternS},
|
||||
{MessagePatternE, MessagePatternDHEE, MessagePatternDHSE, MessagePatternS, MessagePatternDHES},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeN = HandshakePattern{
|
||||
Name: "N",
|
||||
ResponderPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternDHES},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeK = HandshakePattern{
|
||||
Name: "K",
|
||||
InitiatorPreMessages: []MessagePattern{MessagePatternS},
|
||||
ResponderPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternDHES, MessagePatternDHSS},
|
||||
},
|
||||
}
|
||||
|
||||
var HandshakeX = HandshakePattern{
|
||||
Name: "X",
|
||||
ResponderPreMessages: []MessagePattern{MessagePatternS},
|
||||
Messages: [][]MessagePattern{
|
||||
{MessagePatternE, MessagePatternDHES, MessagePatternS, MessagePatternDHSS},
|
||||
},
|
||||
}
|
600
vendor/github.com/flynn/noise/state.go
generated
vendored
Normal file
600
vendor/github.com/flynn/noise/state.go
generated
vendored
Normal file
@ -0,0 +1,600 @@
|
||||
// Package noise implements the Noise Protocol Framework.
|
||||
//
|
||||
// Noise is a low-level framework for building crypto protocols. Noise protocols
|
||||
// support mutual and optional authentication, identity hiding, forward secrecy,
|
||||
// zero round-trip encryption, and other advanced features. For more details,
|
||||
// visit https://noiseprotocol.org.
|
||||
package noise
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
// A CipherState provides symmetric encryption and decryption after a successful
|
||||
// handshake.
|
||||
type CipherState struct {
|
||||
cs CipherSuite
|
||||
c Cipher
|
||||
k [32]byte
|
||||
n uint64
|
||||
|
||||
invalid bool
|
||||
}
|
||||
|
||||
// MaxNonce is the maximum value of n that is allowed. ErrMaxNonce is returned
|
||||
// by Encrypt and Decrypt after this has been reached. 2^64-1 is reserved for rekeys.
|
||||
const MaxNonce = uint64(math.MaxUint64) - 1
|
||||
|
||||
var ErrMaxNonce = errors.New("noise: cipherstate has reached maximum n, a new handshake must be performed")
|
||||
var ErrCipherSuiteCopied = errors.New("noise: CipherSuite has been copied, state is invalid")
|
||||
|
||||
// Encrypt encrypts the plaintext and then appends the ciphertext and an
|
||||
// authentication tag across the ciphertext and optional authenticated data to
|
||||
// out. This method automatically increments the nonce after every call, so
|
||||
// messages must be decrypted in the same order. ErrMaxNonce is returned after
|
||||
// the maximum nonce of 2^64-2 is reached.
|
||||
func (s *CipherState) Encrypt(out, ad, plaintext []byte) ([]byte, error) {
|
||||
if s.invalid {
|
||||
return nil, ErrCipherSuiteCopied
|
||||
}
|
||||
if s.n > MaxNonce {
|
||||
return nil, ErrMaxNonce
|
||||
}
|
||||
out = s.c.Encrypt(out, s.n, ad, plaintext)
|
||||
s.n++
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Decrypt checks the authenticity of the ciphertext and authenticated data and
|
||||
// then decrypts and appends the plaintext to out. This method automatically
|
||||
// increments the nonce after every call, messages must be provided in the same
|
||||
// order that they were encrypted with no missing messages. ErrMaxNonce is
|
||||
// returned after the maximum nonce of 2^64-2 is reached.
|
||||
func (s *CipherState) Decrypt(out, ad, ciphertext []byte) ([]byte, error) {
|
||||
if s.invalid {
|
||||
return nil, ErrCipherSuiteCopied
|
||||
}
|
||||
if s.n > MaxNonce {
|
||||
return nil, ErrMaxNonce
|
||||
}
|
||||
out, err := s.c.Decrypt(out, s.n, ad, ciphertext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.n++
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Cipher returns the low-level symmetric encryption primitive. It should only
|
||||
// be used if nonces need to be managed manually, for example with a network
|
||||
// protocol that can deliver out-of-order messages. This is dangerous, users
|
||||
// must ensure that they are incrementing a nonce after every encrypt operation.
|
||||
// After calling this method, it is an error to call Encrypt/Decrypt on the
|
||||
// CipherState.
|
||||
func (s *CipherState) Cipher() Cipher {
|
||||
s.invalid = true
|
||||
return s.c
|
||||
}
|
||||
|
||||
// Nonce returns the current value of n. This can be used to determine if a
|
||||
// new handshake should be performed due to approaching MaxNonce.
|
||||
func (s *CipherState) Nonce() uint64 {
|
||||
return s.n
|
||||
}
|
||||
|
||||
func (s *CipherState) Rekey() {
|
||||
var zeros [32]byte
|
||||
var out []byte
|
||||
out = s.c.Encrypt(out, math.MaxUint64, []byte{}, zeros[:])
|
||||
copy(s.k[:], out[:32])
|
||||
s.c = s.cs.Cipher(s.k)
|
||||
}
|
||||
|
||||
type symmetricState struct {
|
||||
CipherState
|
||||
hasK bool
|
||||
ck []byte
|
||||
h []byte
|
||||
|
||||
prevCK []byte
|
||||
prevH []byte
|
||||
}
|
||||
|
||||
func (s *symmetricState) InitializeSymmetric(handshakeName []byte) {
|
||||
h := s.cs.Hash()
|
||||
if len(handshakeName) <= h.Size() {
|
||||
s.h = make([]byte, h.Size())
|
||||
copy(s.h, handshakeName)
|
||||
} else {
|
||||
h.Write(handshakeName)
|
||||
s.h = h.Sum(nil)
|
||||
}
|
||||
s.ck = make([]byte, len(s.h))
|
||||
copy(s.ck, s.h)
|
||||
}
|
||||
|
||||
func (s *symmetricState) MixKey(dhOutput []byte) {
|
||||
s.n = 0
|
||||
s.hasK = true
|
||||
var hk []byte
|
||||
s.ck, hk, _ = hkdf(s.cs.Hash, 2, s.ck[:0], s.k[:0], nil, s.ck, dhOutput)
|
||||
copy(s.k[:], hk)
|
||||
s.c = s.cs.Cipher(s.k)
|
||||
}
|
||||
|
||||
func (s *symmetricState) MixHash(data []byte) {
|
||||
h := s.cs.Hash()
|
||||
h.Write(s.h)
|
||||
h.Write(data)
|
||||
s.h = h.Sum(s.h[:0])
|
||||
}
|
||||
|
||||
func (s *symmetricState) MixKeyAndHash(data []byte) {
|
||||
var hk []byte
|
||||
var temp []byte
|
||||
s.ck, temp, hk = hkdf(s.cs.Hash, 3, s.ck[:0], temp, s.k[:0], s.ck, data)
|
||||
s.MixHash(temp)
|
||||
copy(s.k[:], hk)
|
||||
s.c = s.cs.Cipher(s.k)
|
||||
s.n = 0
|
||||
s.hasK = true
|
||||
}
|
||||
|
||||
func (s *symmetricState) EncryptAndHash(out, plaintext []byte) ([]byte, error) {
|
||||
if !s.hasK {
|
||||
s.MixHash(plaintext)
|
||||
return append(out, plaintext...), nil
|
||||
}
|
||||
ciphertext, err := s.Encrypt(out, s.h, plaintext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.MixHash(ciphertext[len(out):])
|
||||
return ciphertext, nil
|
||||
}
|
||||
|
||||
func (s *symmetricState) DecryptAndHash(out, data []byte) ([]byte, error) {
|
||||
if !s.hasK {
|
||||
s.MixHash(data)
|
||||
return append(out, data...), nil
|
||||
}
|
||||
plaintext, err := s.Decrypt(out, s.h, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.MixHash(data)
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
func (s *symmetricState) Split() (*CipherState, *CipherState) {
|
||||
s1, s2 := &CipherState{cs: s.cs}, &CipherState{cs: s.cs}
|
||||
hk1, hk2, _ := hkdf(s.cs.Hash, 2, s1.k[:0], s2.k[:0], nil, s.ck, nil)
|
||||
copy(s1.k[:], hk1)
|
||||
copy(s2.k[:], hk2)
|
||||
s1.c = s.cs.Cipher(s1.k)
|
||||
s2.c = s.cs.Cipher(s2.k)
|
||||
return s1, s2
|
||||
}
|
||||
|
||||
func (s *symmetricState) Checkpoint() {
|
||||
if len(s.ck) > cap(s.prevCK) {
|
||||
s.prevCK = make([]byte, len(s.ck))
|
||||
}
|
||||
s.prevCK = s.prevCK[:len(s.ck)]
|
||||
copy(s.prevCK, s.ck)
|
||||
|
||||
if len(s.h) > cap(s.prevH) {
|
||||
s.prevH = make([]byte, len(s.h))
|
||||
}
|
||||
s.prevH = s.prevH[:len(s.h)]
|
||||
copy(s.prevH, s.h)
|
||||
}
|
||||
|
||||
func (s *symmetricState) Rollback() {
|
||||
s.ck = s.ck[:len(s.prevCK)]
|
||||
copy(s.ck, s.prevCK)
|
||||
s.h = s.h[:len(s.prevH)]
|
||||
copy(s.h, s.prevH)
|
||||
}
|
||||
|
||||
// A MessagePattern is a single message or operation used in a Noise handshake.
|
||||
type MessagePattern int
|
||||
|
||||
// A HandshakePattern is a list of messages and operations that are used to
|
||||
// perform a specific Noise handshake.
|
||||
type HandshakePattern struct {
|
||||
Name string
|
||||
InitiatorPreMessages []MessagePattern
|
||||
ResponderPreMessages []MessagePattern
|
||||
Messages [][]MessagePattern
|
||||
}
|
||||
|
||||
const (
|
||||
MessagePatternS MessagePattern = iota
|
||||
MessagePatternE
|
||||
MessagePatternDHEE
|
||||
MessagePatternDHES
|
||||
MessagePatternDHSE
|
||||
MessagePatternDHSS
|
||||
MessagePatternPSK
|
||||
)
|
||||
|
||||
// MaxMsgLen is the maximum number of bytes that can be sent in a single Noise
|
||||
// message.
|
||||
const MaxMsgLen = 65535
|
||||
|
||||
// A HandshakeState tracks the state of a Noise handshake. It may be discarded
|
||||
// after the handshake is complete.
|
||||
type HandshakeState struct {
|
||||
ss symmetricState
|
||||
s DHKey // local static keypair
|
||||
e DHKey // local ephemeral keypair
|
||||
rs []byte // remote party's static public key
|
||||
re []byte // remote party's ephemeral public key
|
||||
psk []byte // preshared key, maybe zero length
|
||||
messagePatterns [][]MessagePattern
|
||||
shouldWrite bool
|
||||
initiator bool
|
||||
msgIdx int
|
||||
rng io.Reader
|
||||
}
|
||||
|
||||
// A Config provides the details necessary to process a Noise handshake. It is
|
||||
// never modified by this package, and can be reused.
|
||||
type Config struct {
|
||||
// CipherSuite is the set of cryptographic primitives that will be used.
|
||||
CipherSuite CipherSuite
|
||||
|
||||
// Random is the source for cryptographically appropriate random bytes. If
|
||||
// zero, it is automatically configured.
|
||||
Random io.Reader
|
||||
|
||||
// Pattern is the pattern for the handshake.
|
||||
Pattern HandshakePattern
|
||||
|
||||
// Initiator must be true if the first message in the handshake will be sent
|
||||
// by this peer.
|
||||
Initiator bool
|
||||
|
||||
// Prologue is an optional message that has already be communicated and must
|
||||
// be identical on both sides for the handshake to succeed.
|
||||
Prologue []byte
|
||||
|
||||
// PresharedKey is the optional preshared key for the handshake.
|
||||
PresharedKey []byte
|
||||
|
||||
// PresharedKeyPlacement specifies the placement position of the PSK token
|
||||
// when PresharedKey is specified
|
||||
PresharedKeyPlacement int
|
||||
|
||||
// StaticKeypair is this peer's static keypair, required if part of the
|
||||
// handshake.
|
||||
StaticKeypair DHKey
|
||||
|
||||
// EphemeralKeypair is this peer's ephemeral keypair that was provided as
|
||||
// a pre-message in the handshake.
|
||||
EphemeralKeypair DHKey
|
||||
|
||||
// PeerStatic is the static public key of the remote peer that was provided
|
||||
// as a pre-message in the handshake.
|
||||
PeerStatic []byte
|
||||
|
||||
// PeerEphemeral is the ephemeral public key of the remote peer that was
|
||||
// provided as a pre-message in the handshake.
|
||||
PeerEphemeral []byte
|
||||
}
|
||||
|
||||
// NewHandshakeState starts a new handshake using the provided configuration.
|
||||
func NewHandshakeState(c Config) (*HandshakeState, error) {
|
||||
hs := &HandshakeState{
|
||||
s: c.StaticKeypair,
|
||||
e: c.EphemeralKeypair,
|
||||
rs: c.PeerStatic,
|
||||
psk: c.PresharedKey,
|
||||
messagePatterns: c.Pattern.Messages,
|
||||
shouldWrite: c.Initiator,
|
||||
initiator: c.Initiator,
|
||||
rng: c.Random,
|
||||
}
|
||||
if hs.rng == nil {
|
||||
hs.rng = rand.Reader
|
||||
}
|
||||
if len(c.PeerEphemeral) > 0 {
|
||||
hs.re = make([]byte, len(c.PeerEphemeral))
|
||||
copy(hs.re, c.PeerEphemeral)
|
||||
}
|
||||
hs.ss.cs = c.CipherSuite
|
||||
pskModifier := ""
|
||||
if len(hs.psk) > 0 {
|
||||
if len(hs.psk) != 32 {
|
||||
return nil, errors.New("noise: specification mandates 256-bit preshared keys")
|
||||
}
|
||||
pskModifier = fmt.Sprintf("psk%d", c.PresharedKeyPlacement)
|
||||
hs.messagePatterns = append([][]MessagePattern(nil), hs.messagePatterns...)
|
||||
if c.PresharedKeyPlacement == 0 {
|
||||
hs.messagePatterns[0] = append([]MessagePattern{MessagePatternPSK}, hs.messagePatterns[0]...)
|
||||
} else {
|
||||
hs.messagePatterns[c.PresharedKeyPlacement-1] = append(hs.messagePatterns[c.PresharedKeyPlacement-1], MessagePatternPSK)
|
||||
}
|
||||
}
|
||||
hs.ss.InitializeSymmetric([]byte("Noise_" + c.Pattern.Name + pskModifier + "_" + string(hs.ss.cs.Name())))
|
||||
hs.ss.MixHash(c.Prologue)
|
||||
for _, m := range c.Pattern.InitiatorPreMessages {
|
||||
switch {
|
||||
case c.Initiator && m == MessagePatternS:
|
||||
hs.ss.MixHash(hs.s.Public)
|
||||
case c.Initiator && m == MessagePatternE:
|
||||
hs.ss.MixHash(hs.e.Public)
|
||||
case !c.Initiator && m == MessagePatternS:
|
||||
hs.ss.MixHash(hs.rs)
|
||||
case !c.Initiator && m == MessagePatternE:
|
||||
hs.ss.MixHash(hs.re)
|
||||
}
|
||||
}
|
||||
for _, m := range c.Pattern.ResponderPreMessages {
|
||||
switch {
|
||||
case !c.Initiator && m == MessagePatternS:
|
||||
hs.ss.MixHash(hs.s.Public)
|
||||
case !c.Initiator && m == MessagePatternE:
|
||||
hs.ss.MixHash(hs.e.Public)
|
||||
case c.Initiator && m == MessagePatternS:
|
||||
hs.ss.MixHash(hs.rs)
|
||||
case c.Initiator && m == MessagePatternE:
|
||||
hs.ss.MixHash(hs.re)
|
||||
}
|
||||
}
|
||||
return hs, nil
|
||||
}
|
||||
|
||||
// WriteMessage appends a handshake message to out. The message will include the
|
||||
// optional payload if provided. If the handshake is completed by the call, two
|
||||
// CipherStates will be returned, one is used for encryption of messages to the
|
||||
// remote peer, the other is used for decryption of messages from the remote
|
||||
// peer. It is an error to call this method out of sync with the handshake
|
||||
// pattern.
|
||||
func (s *HandshakeState) WriteMessage(out, payload []byte) ([]byte, *CipherState, *CipherState, error) {
|
||||
if !s.shouldWrite {
|
||||
return nil, nil, nil, errors.New("noise: unexpected call to WriteMessage should be ReadMessage")
|
||||
}
|
||||
if s.msgIdx > len(s.messagePatterns)-1 {
|
||||
return nil, nil, nil, errors.New("noise: no handshake messages left")
|
||||
}
|
||||
if len(payload) > MaxMsgLen {
|
||||
return nil, nil, nil, errors.New("noise: message is too long")
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, msg := range s.messagePatterns[s.msgIdx] {
|
||||
switch msg {
|
||||
case MessagePatternE:
|
||||
e, err := s.ss.cs.GenerateKeypair(s.rng)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.e = e
|
||||
out = append(out, s.e.Public...)
|
||||
s.ss.MixHash(s.e.Public)
|
||||
if len(s.psk) > 0 {
|
||||
s.ss.MixKey(s.e.Public)
|
||||
}
|
||||
case MessagePatternS:
|
||||
if len(s.s.Public) == 0 {
|
||||
return nil, nil, nil, errors.New("noise: invalid state, s.Public is nil")
|
||||
}
|
||||
out, err = s.ss.EncryptAndHash(out, s.s.Public)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
case MessagePatternDHEE:
|
||||
dh, err := s.ss.cs.DH(s.e.Private, s.re)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
case MessagePatternDHES:
|
||||
if s.initiator {
|
||||
dh, err := s.ss.cs.DH(s.e.Private, s.rs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
} else {
|
||||
dh, err := s.ss.cs.DH(s.s.Private, s.re)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
}
|
||||
case MessagePatternDHSE:
|
||||
if s.initiator {
|
||||
dh, err := s.ss.cs.DH(s.s.Private, s.re)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
} else {
|
||||
dh, err := s.ss.cs.DH(s.e.Private, s.rs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
}
|
||||
case MessagePatternDHSS:
|
||||
dh, err := s.ss.cs.DH(s.s.Private, s.rs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
case MessagePatternPSK:
|
||||
s.ss.MixKeyAndHash(s.psk)
|
||||
}
|
||||
}
|
||||
s.shouldWrite = false
|
||||
s.msgIdx++
|
||||
out, err = s.ss.EncryptAndHash(out, payload)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
if s.msgIdx >= len(s.messagePatterns) {
|
||||
cs1, cs2 := s.ss.Split()
|
||||
return out, cs1, cs2, nil
|
||||
}
|
||||
|
||||
return out, nil, nil, nil
|
||||
}
|
||||
|
||||
// ErrShortMessage is returned by ReadMessage if a message is not as long as it should be.
|
||||
var ErrShortMessage = errors.New("noise: message is too short")
|
||||
|
||||
// ReadMessage processes a received handshake message and appends the payload,
|
||||
// if any to out. If the handshake is completed by the call, two CipherStates
|
||||
// will be returned, one is used for encryption of messages to the remote peer,
|
||||
// the other is used for decryption of messages from the remote peer. It is an
|
||||
// error to call this method out of sync with the handshake pattern.
|
||||
func (s *HandshakeState) ReadMessage(out, message []byte) ([]byte, *CipherState, *CipherState, error) {
|
||||
if s.shouldWrite {
|
||||
return nil, nil, nil, errors.New("noise: unexpected call to ReadMessage should be WriteMessage")
|
||||
}
|
||||
if s.msgIdx > len(s.messagePatterns)-1 {
|
||||
return nil, nil, nil, errors.New("noise: no handshake messages left")
|
||||
}
|
||||
|
||||
rsSet := false
|
||||
s.ss.Checkpoint()
|
||||
|
||||
var err error
|
||||
for _, msg := range s.messagePatterns[s.msgIdx] {
|
||||
switch msg {
|
||||
case MessagePatternE, MessagePatternS:
|
||||
expected := s.ss.cs.DHLen()
|
||||
if msg == MessagePatternS && s.ss.hasK {
|
||||
expected += 16
|
||||
}
|
||||
if len(message) < expected {
|
||||
return nil, nil, nil, ErrShortMessage
|
||||
}
|
||||
switch msg {
|
||||
case MessagePatternE:
|
||||
if cap(s.re) < s.ss.cs.DHLen() {
|
||||
s.re = make([]byte, s.ss.cs.DHLen())
|
||||
}
|
||||
s.re = s.re[:s.ss.cs.DHLen()]
|
||||
copy(s.re, message)
|
||||
s.ss.MixHash(s.re)
|
||||
if len(s.psk) > 0 {
|
||||
s.ss.MixKey(s.re)
|
||||
}
|
||||
case MessagePatternS:
|
||||
if len(s.rs) > 0 {
|
||||
return nil, nil, nil, errors.New("noise: invalid state, rs is not nil")
|
||||
}
|
||||
s.rs, err = s.ss.DecryptAndHash(s.rs[:0], message[:expected])
|
||||
rsSet = true
|
||||
}
|
||||
if err != nil {
|
||||
s.ss.Rollback()
|
||||
if rsSet {
|
||||
s.rs = nil
|
||||
}
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
message = message[expected:]
|
||||
case MessagePatternDHEE:
|
||||
dh, err := s.ss.cs.DH(s.e.Private, s.re)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
case MessagePatternDHES:
|
||||
if s.initiator {
|
||||
dh, err := s.ss.cs.DH(s.e.Private, s.rs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
} else {
|
||||
dh, err := s.ss.cs.DH(s.s.Private, s.re)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
}
|
||||
case MessagePatternDHSE:
|
||||
if s.initiator {
|
||||
dh, err := s.ss.cs.DH(s.s.Private, s.re)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
} else {
|
||||
dh, err := s.ss.cs.DH(s.e.Private, s.rs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
}
|
||||
case MessagePatternDHSS:
|
||||
dh, err := s.ss.cs.DH(s.s.Private, s.rs)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.ss.MixKey(dh)
|
||||
case MessagePatternPSK:
|
||||
s.ss.MixKeyAndHash(s.psk)
|
||||
}
|
||||
}
|
||||
out, err = s.ss.DecryptAndHash(out, message)
|
||||
if err != nil {
|
||||
s.ss.Rollback()
|
||||
if rsSet {
|
||||
s.rs = nil
|
||||
}
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
s.shouldWrite = true
|
||||
s.msgIdx++
|
||||
|
||||
if s.msgIdx >= len(s.messagePatterns) {
|
||||
cs1, cs2 := s.ss.Split()
|
||||
return out, cs1, cs2, nil
|
||||
}
|
||||
|
||||
return out, nil, nil, nil
|
||||
}
|
||||
|
||||
// ChannelBinding provides a value that uniquely identifies the session and can
|
||||
// be used as a channel binding. It is an error to call this method before the
|
||||
// handshake is complete.
|
||||
func (s *HandshakeState) ChannelBinding() []byte {
|
||||
return s.ss.h
|
||||
}
|
||||
|
||||
// PeerStatic returns the static key provided by the remote peer during
|
||||
// a handshake. It is an error to call this method if a handshake message
|
||||
// containing a static key has not been read.
|
||||
func (s *HandshakeState) PeerStatic() []byte {
|
||||
return s.rs
|
||||
}
|
||||
|
||||
// MessageIndex returns the current handshake message id
|
||||
func (s *HandshakeState) MessageIndex() int {
|
||||
return s.msgIdx
|
||||
}
|
||||
|
||||
// PeerEphemeral returns the ephemeral key provided by the remote peer during
|
||||
// a handshake. It is an error to call this method if a handshake message
|
||||
// containing a static key has not been read.
|
||||
func (s *HandshakeState) PeerEphemeral() []byte {
|
||||
return s.re
|
||||
}
|
||||
|
||||
// LocalEphemeral returns the local ephemeral key pair generated during
|
||||
// a handshake.
|
||||
func (s *HandshakeState) LocalEphemeral() DHKey {
|
||||
return s.e
|
||||
}
|
28640
vendor/github.com/flynn/noise/vectors.txt
generated
vendored
Normal file
28640
vendor/github.com/flynn/noise/vectors.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
324
vendor/github.com/golang/protobuf/proto/buffer.go
generated
vendored
Normal file
@ -0,0 +1,324 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed32 = 5
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
)
|
||||
|
||||
// EncodeVarint returns the varint encoded bytes of v.
|
||||
func EncodeVarint(v uint64) []byte {
|
||||
return protowire.AppendVarint(nil, v)
|
||||
}
|
||||
|
||||
// SizeVarint returns the length of the varint encoded bytes of v.
|
||||
// This is equal to len(EncodeVarint(v)).
|
||||
func SizeVarint(v uint64) int {
|
||||
return protowire.SizeVarint(v)
|
||||
}
|
||||
|
||||
// DecodeVarint parses a varint encoded integer from b,
|
||||
// returning the integer value and the length of the varint.
|
||||
// It returns (0, 0) if there is a parse error.
|
||||
func DecodeVarint(b []byte) (uint64, int) {
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return 0, 0
|
||||
}
|
||||
return v, n
|
||||
}
|
||||
|
||||
// Buffer is a buffer for encoding and decoding the protobuf wire format.
|
||||
// It may be reused between invocations to reduce memory usage.
|
||||
type Buffer struct {
|
||||
buf []byte
|
||||
idx int
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer initialized with buf,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
return &Buffer{buf: buf}
|
||||
}
|
||||
|
||||
// SetDeterministic specifies whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (b *Buffer) SetDeterministic(deterministic bool) {
|
||||
b.deterministic = deterministic
|
||||
}
|
||||
|
||||
// SetBuf sets buf as the internal buffer,
|
||||
// where the contents of buf are considered the unread portion of the buffer.
|
||||
func (b *Buffer) SetBuf(buf []byte) {
|
||||
b.buf = buf
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Reset clears the internal buffer of all written and unread data.
|
||||
func (b *Buffer) Reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.idx = 0
|
||||
}
|
||||
|
||||
// Bytes returns the internal buffer.
|
||||
func (b *Buffer) Bytes() []byte {
|
||||
return b.buf
|
||||
}
|
||||
|
||||
// Unread returns the unread portion of the buffer.
|
||||
func (b *Buffer) Unread() []byte {
|
||||
return b.buf[b.idx:]
|
||||
}
|
||||
|
||||
// Marshal appends the wire-format encoding of m to the buffer.
|
||||
func (b *Buffer) Marshal(m Message) error {
|
||||
var err error
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the wire-format message in the buffer and
|
||||
// places the decoded results in m.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) Unmarshal(m Message) error {
|
||||
err := UnmarshalMerge(b.Unread(), m)
|
||||
b.idx = len(b.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
|
||||
|
||||
func (m *unknownFields) String() string { panic("not implemented") }
|
||||
func (m *unknownFields) Reset() { panic("not implemented") }
|
||||
func (m *unknownFields) ProtoMessage() { panic("not implemented") }
|
||||
|
||||
// DebugPrint dumps the encoded bytes of b with a header and footer including s
|
||||
// to stdout. This is only intended for debugging.
|
||||
func (*Buffer) DebugPrint(s string, b []byte) {
|
||||
m := MessageReflect(new(unknownFields))
|
||||
m.SetUnknown(b)
|
||||
b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
|
||||
fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
|
||||
}
|
||||
|
||||
// EncodeVarint appends an unsigned varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeVarint(v uint64) error {
|
||||
b.buf = protowire.AppendVarint(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag32(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
|
||||
func (b *Buffer) EncodeZigzag64(v uint64) error {
|
||||
return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed32(v uint64) error {
|
||||
b.buf = protowire.AppendFixed32(b.buf, uint32(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
|
||||
func (b *Buffer) EncodeFixed64(v uint64) error {
|
||||
b.buf = protowire.AppendFixed64(b.buf, uint64(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
|
||||
func (b *Buffer) EncodeRawBytes(v []byte) error {
|
||||
b.buf = protowire.AppendBytes(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
|
||||
// It does not validate whether v contains valid UTF-8.
|
||||
func (b *Buffer) EncodeStringBytes(v string) error {
|
||||
b.buf = protowire.AppendString(b.buf, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMessage appends a length-prefixed encoded message to the buffer.
|
||||
func (b *Buffer) EncodeMessage(m Message) error {
|
||||
var err error
|
||||
b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
|
||||
b.buf, err = marshalAppend(b.buf, m, b.deterministic)
|
||||
return err
|
||||
}
|
||||
|
||||
// DecodeVarint consumes an encoded unsigned varint from the buffer.
|
||||
func (b *Buffer) DecodeVarint() (uint64, error) {
|
||||
v, n := protowire.ConsumeVarint(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag32() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
|
||||
}
|
||||
|
||||
// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
|
||||
func (b *Buffer) DecodeZigzag64() (uint64, error) {
|
||||
v, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
|
||||
}
|
||||
|
||||
// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed32() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
|
||||
func (b *Buffer) DecodeFixed64() (uint64, error) {
|
||||
v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return 0, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return uint64(v), nil
|
||||
}
|
||||
|
||||
// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// If alloc is specified, it returns a copy the raw bytes
|
||||
// rather than a sub-slice of the buffer.
|
||||
func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
|
||||
v, n := protowire.ConsumeBytes(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return nil, protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
if alloc {
|
||||
v = append([]byte(nil), v...)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
|
||||
// It does not validate whether the raw bytes contain valid UTF-8.
|
||||
func (b *Buffer) DecodeStringBytes() (string, error) {
|
||||
v, n := protowire.ConsumeString(b.buf[b.idx:])
|
||||
if n < 0 {
|
||||
return "", protowire.ParseError(n)
|
||||
}
|
||||
b.idx += n
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// DecodeMessage consumes a length-prefixed message from the buffer.
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeMessage(m Message) error {
|
||||
v, err := b.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// DecodeGroup consumes a message group from the buffer.
|
||||
// It assumes that the start group marker has already been consumed and
|
||||
// consumes all bytes until (and including the end group marker).
|
||||
// It does not reset m before unmarshaling.
|
||||
func (b *Buffer) DecodeGroup(m Message) error {
|
||||
v, n, err := consumeGroup(b.buf[b.idx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.idx += n
|
||||
return UnmarshalMerge(v, m)
|
||||
}
|
||||
|
||||
// consumeGroup parses b until it finds an end group marker, returning
|
||||
// the raw bytes of the message (excluding the end group marker) and the
|
||||
// the total length of the message (including the end group marker).
|
||||
func consumeGroup(b []byte) ([]byte, int, error) {
|
||||
b0 := b
|
||||
depth := 1 // assume this follows a start group marker
|
||||
for {
|
||||
_, wtyp, tagLen := protowire.ConsumeTag(b)
|
||||
if tagLen < 0 {
|
||||
return nil, 0, protowire.ParseError(tagLen)
|
||||
}
|
||||
b = b[tagLen:]
|
||||
|
||||
var valLen int
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
_, valLen = protowire.ConsumeVarint(b)
|
||||
case protowire.Fixed32Type:
|
||||
_, valLen = protowire.ConsumeFixed32(b)
|
||||
case protowire.Fixed64Type:
|
||||
_, valLen = protowire.ConsumeFixed64(b)
|
||||
case protowire.BytesType:
|
||||
_, valLen = protowire.ConsumeBytes(b)
|
||||
case protowire.StartGroupType:
|
||||
depth++
|
||||
case protowire.EndGroupType:
|
||||
depth--
|
||||
default:
|
||||
return nil, 0, errors.New("proto: cannot parse reserved wire type")
|
||||
}
|
||||
if valLen < 0 {
|
||||
return nil, 0, protowire.ParseError(valLen)
|
||||
}
|
||||
b = b[valLen:]
|
||||
|
||||
if depth == 0 {
|
||||
return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
|
||||
}
|
||||
}
|
||||
}
|
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/defaults.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
// SetDefaults sets unpopulated scalar fields to their default values.
|
||||
// Fields within a oneof are not set even if they have a default value.
|
||||
// SetDefaults is recursively called upon any populated message fields.
|
||||
func SetDefaults(m Message) {
|
||||
if m != nil {
|
||||
setDefaults(MessageReflect(m))
|
||||
}
|
||||
}
|
||||
|
||||
func setDefaults(m protoreflect.Message) {
|
||||
fds := m.Descriptor().Fields()
|
||||
for i := 0; i < fds.Len(); i++ {
|
||||
fd := fds.Get(i)
|
||||
if !m.Has(fd) {
|
||||
if fd.HasDefault() && fd.ContainingOneof() == nil {
|
||||
v := fd.Default()
|
||||
if fd.Kind() == protoreflect.BytesKind {
|
||||
v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
|
||||
}
|
||||
m.Set(fd, v)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
switch {
|
||||
// Handle singular message.
|
||||
case fd.Cardinality() != protoreflect.Repeated:
|
||||
if fd.Message() != nil {
|
||||
setDefaults(m.Get(fd).Message())
|
||||
}
|
||||
// Handle list of messages.
|
||||
case fd.IsList():
|
||||
if fd.Message() != nil {
|
||||
ls := m.Get(fd).List()
|
||||
for i := 0; i < ls.Len(); i++ {
|
||||
setDefaults(ls.Get(i).Message())
|
||||
}
|
||||
}
|
||||
// Handle map of messages.
|
||||
case fd.IsMap():
|
||||
if fd.MapValue().Message() != nil {
|
||||
ms := m.Get(fd).Map()
|
||||
ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
setDefaults(v.Message())
|
||||
return true
|
||||
})
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
167
vendor/github.com/golang/protobuf/proto/proto.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package proto provides functionality for handling protocol buffer messages.
|
||||
// In particular, it provides marshaling and unmarshaling between a protobuf
|
||||
// message and the binary wire format.
|
||||
//
|
||||
// See https://developers.google.com/protocol-buffers/docs/gotutorial for
|
||||
// more information.
|
||||
//
|
||||
// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
ProtoPackageIsVersion1 = true
|
||||
ProtoPackageIsVersion2 = true
|
||||
ProtoPackageIsVersion3 = true
|
||||
ProtoPackageIsVersion4 = true
|
||||
)
|
||||
|
||||
// GeneratedEnum is any enum type generated by protoc-gen-go
|
||||
// which is a named int32 kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedEnum interface{}
|
||||
|
||||
// GeneratedMessage is any message type generated by protoc-gen-go
|
||||
// which is a pointer to a named struct kind.
|
||||
// This type exists for documentation purposes.
|
||||
type GeneratedMessage interface{}
|
||||
|
||||
// Message is a protocol buffer message.
|
||||
//
|
||||
// This is the v1 version of the message interface and is marginally better
|
||||
// than an empty interface as it lacks any method to programatically interact
|
||||
// with the contents of the message.
|
||||
//
|
||||
// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
|
||||
// exposes protobuf reflection as a first-class feature of the interface.
|
||||
//
|
||||
// To convert a v1 message to a v2 message, use the MessageV2 function.
|
||||
// To convert a v2 message to a v1 message, use the MessageV1 function.
|
||||
type Message = protoiface.MessageV1
|
||||
|
||||
// MessageV1 converts either a v1 or v2 message to a v1 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
|
||||
return protoimpl.X.ProtoMessageV1Of(m)
|
||||
}
|
||||
|
||||
// MessageV2 converts either a v1 or v2 message to a v2 message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageV2(m GeneratedMessage) protoV2.Message {
|
||||
return protoimpl.X.ProtoMessageV2Of(m)
|
||||
}
|
||||
|
||||
// MessageReflect returns a reflective view for a message.
|
||||
// It returns nil if m is nil.
|
||||
func MessageReflect(m Message) protoreflect.Message {
|
||||
return protoimpl.X.MessageOf(m)
|
||||
}
|
||||
|
||||
// Marshaler is implemented by messages that can marshal themselves.
|
||||
// This interface is used by the following functions: Size, Marshal,
|
||||
// Buffer.Marshal, and Buffer.EncodeMessage.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Marshaler interface {
|
||||
// Marshal formats the encoded bytes of the message.
|
||||
// It should be deterministic and emit valid protobuf wire data.
|
||||
// The caller takes ownership of the returned buffer.
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// Unmarshaler is implemented by messages that can unmarshal themselves.
|
||||
// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
|
||||
// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Unmarshaler interface {
|
||||
// Unmarshal parses the encoded bytes of the protobuf wire input.
|
||||
// The provided buffer is only valid for during method call.
|
||||
// It should not reset the receiver message.
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Merger is implemented by messages that can merge themselves.
|
||||
// This interface is used by the following functions: Clone and Merge.
|
||||
//
|
||||
// Deprecated: Do not implement.
|
||||
type Merger interface {
|
||||
// Merge merges the contents of src into the receiver message.
|
||||
// It clones all data structures in src such that it aliases no mutable
|
||||
// memory referenced by src.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// RequiredNotSetError is an error type returned when
|
||||
// marshaling or unmarshaling a message with missing required fields.
|
||||
type RequiredNotSetError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.err != nil {
|
||||
return e.err.Error()
|
||||
}
|
||||
return "proto: required field not set"
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func checkRequiredNotSet(m protoV2.Message) error {
|
||||
if err := protoV2.CheckInitialized(m); err != nil {
|
||||
return &RequiredNotSetError{err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of src.
|
||||
func Clone(src Message) Message {
|
||||
return MessageV1(protoV2.Clone(MessageV2(src)))
|
||||
}
|
||||
|
||||
// Merge merges src into dst, which must be messages of the same type.
|
||||
//
|
||||
// Populated scalar fields in src are copied to dst, while populated
|
||||
// singular messages in src are merged into dst by recursively calling Merge.
|
||||
// The elements of every list field in src is appended to the corresponded
|
||||
// list fields in dst. The entries of every map field in src is copied into
|
||||
// the corresponding map field in dst, possibly replacing existing entries.
|
||||
// The unknown fields of src are appended to the unknown fields of dst.
|
||||
func Merge(dst, src Message) {
|
||||
protoV2.Merge(MessageV2(dst), MessageV2(src))
|
||||
}
|
||||
|
||||
// Equal reports whether two messages are equal.
|
||||
// If two messages marshal to the same bytes under deterministic serialization,
|
||||
// then Equal is guaranteed to report true.
|
||||
//
|
||||
// Two messages are equal if they are the same protobuf message type,
|
||||
// have the same set of populated known and extension field values,
|
||||
// and the same set of unknown fields values.
|
||||
//
|
||||
// Scalar values are compared with the equivalent of the == operator in Go,
|
||||
// except bytes values which are compared using bytes.Equal and
|
||||
// floating point values which specially treat NaNs as equal.
|
||||
// Message values are compared by recursively calling Equal.
|
||||
// Lists are equal if each element value is also equal.
|
||||
// Maps are equal if they have the same set of keys, where the pair of values
|
||||
// for each key is also equal.
|
||||
func Equal(x, y Message) bool {
|
||||
return protoV2.Equal(MessageV2(x), MessageV2(y))
|
||||
}
|
||||
|
||||
func isMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||
ms, ok := md.(interface{ IsMessageSet() bool })
|
||||
return ok && ms.IsMessageSet()
|
||||
}
|
317
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
317
vendor/github.com/golang/protobuf/proto/registry.go
generated
vendored
Normal file
@ -0,0 +1,317 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/reflect/protodesc"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
// filePath is the path to the proto source file.
|
||||
type filePath = string // e.g., "google/protobuf/descriptor.proto"
|
||||
|
||||
// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
|
||||
type fileDescGZIP = []byte
|
||||
|
||||
var fileCache sync.Map // map[filePath]fileDescGZIP
|
||||
|
||||
// RegisterFile is called from generated code to register the compressed
|
||||
// FileDescriptorProto with the file path for a proto source file.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
|
||||
func RegisterFile(s filePath, d fileDescGZIP) {
|
||||
// Decompress the descriptor.
|
||||
zr, err := gzip.NewReader(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
b, err := ioutil.ReadAll(zr)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
|
||||
}
|
||||
|
||||
// Construct a protoreflect.FileDescriptor from the raw descriptor.
|
||||
// Note that DescBuilder.Build automatically registers the constructed
|
||||
// file descriptor with the v2 registry.
|
||||
protoimpl.DescBuilder{RawDescriptor: b}.Build()
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
fileCache.Store(s, d)
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto given the file path
|
||||
// for a proto source file. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
|
||||
func FileDescriptor(s filePath) fileDescGZIP {
|
||||
if v, ok := fileCache.Load(s); ok {
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
|
||||
// Find the descriptor in the v2 registry.
|
||||
var b []byte
|
||||
if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
|
||||
b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
|
||||
}
|
||||
|
||||
// Locally cache the raw descriptor form for the file.
|
||||
if len(b) > 0 {
|
||||
v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
|
||||
return v.(fileDescGZIP)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enumName is the name of an enum. For historical reasons, the enum name is
|
||||
// neither the full Go name nor the full protobuf name of the enum.
|
||||
// The name is the dot-separated combination of just the proto package that the
|
||||
// enum is declared within followed by the Go type name of the generated enum.
|
||||
type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
|
||||
|
||||
// enumsByName maps enum values by name to their numeric counterpart.
|
||||
type enumsByName = map[string]int32
|
||||
|
||||
// enumsByNumber maps enum values by number to their name counterpart.
|
||||
type enumsByNumber = map[int32]string
|
||||
|
||||
var enumCache sync.Map // map[enumName]enumsByName
|
||||
var numFilesCache sync.Map // map[protoreflect.FullName]int
|
||||
|
||||
// RegisterEnum is called from the generated code to register the mapping of
|
||||
// enum value names to enum numbers for the enum identified by s.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
|
||||
func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
|
||||
if _, ok := enumCache.Load(s); ok {
|
||||
panic("proto: duplicate enum registered: " + s)
|
||||
}
|
||||
enumCache.Store(s, m)
|
||||
|
||||
// This does not forward registration to the v2 registry since this API
|
||||
// lacks sufficient information to construct a complete v2 enum descriptor.
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from enum value names to enum numbers for
|
||||
// the enum of the given name. It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
|
||||
func EnumValueMap(s enumName) enumsByName {
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
|
||||
// Check whether the cache is stale. If the number of files in the current
|
||||
// package differs, then it means that some enums may have been recently
|
||||
// registered upstream that we do not know about.
|
||||
var protoPkg protoreflect.FullName
|
||||
if i := strings.LastIndexByte(s, '.'); i >= 0 {
|
||||
protoPkg = protoreflect.FullName(s[:i])
|
||||
}
|
||||
v, _ := numFilesCache.Load(protoPkg)
|
||||
numFiles, _ := v.(int)
|
||||
if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
|
||||
return nil // cache is up-to-date; was not found earlier
|
||||
}
|
||||
|
||||
// Update the enum cache for all enums declared in the given proto package.
|
||||
numFiles = 0
|
||||
protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
|
||||
walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
|
||||
name := protoimpl.X.LegacyEnumName(ed)
|
||||
if _, ok := enumCache.Load(name); !ok {
|
||||
m := make(enumsByName)
|
||||
evs := ed.Values()
|
||||
for i := evs.Len() - 1; i >= 0; i-- {
|
||||
ev := evs.Get(i)
|
||||
m[string(ev.Name())] = int32(ev.Number())
|
||||
}
|
||||
enumCache.LoadOrStore(name, m)
|
||||
}
|
||||
})
|
||||
numFiles++
|
||||
return true
|
||||
})
|
||||
numFilesCache.Store(protoPkg, numFiles)
|
||||
|
||||
// Check cache again for enum map.
|
||||
if v, ok := enumCache.Load(s); ok {
|
||||
return v.(enumsByName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// walkEnums recursively walks all enums declared in d.
|
||||
func walkEnums(d interface {
|
||||
Enums() protoreflect.EnumDescriptors
|
||||
Messages() protoreflect.MessageDescriptors
|
||||
}, f func(protoreflect.EnumDescriptor)) {
|
||||
eds := d.Enums()
|
||||
for i := eds.Len() - 1; i >= 0; i-- {
|
||||
f(eds.Get(i))
|
||||
}
|
||||
mds := d.Messages()
|
||||
for i := mds.Len() - 1; i >= 0; i-- {
|
||||
walkEnums(mds.Get(i), f)
|
||||
}
|
||||
}
|
||||
|
||||
// messageName is the full name of protobuf message.
|
||||
type messageName = string
|
||||
|
||||
var messageTypeCache sync.Map // map[messageName]reflect.Type
|
||||
|
||||
// RegisterType is called from generated code to register the message Go type
|
||||
// for a message of the given name.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
|
||||
func RegisterType(m Message, s messageName) {
|
||||
mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
|
||||
if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
messageTypeCache.Store(s, reflect.TypeOf(m))
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code to register the Go map type
|
||||
// for a protobuf message representing a map entry.
|
||||
//
|
||||
// Deprecated: Do not use.
|
||||
func RegisterMapType(m interface{}, s messageName) {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("invalid map kind: %v", t))
|
||||
}
|
||||
if _, ok := messageTypeCache.Load(s); ok {
|
||||
panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
|
||||
}
|
||||
messageTypeCache.Store(s, t)
|
||||
}
|
||||
|
||||
// MessageType returns the message type for a named message.
|
||||
// It returns nil if not found.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
|
||||
func MessageType(s messageName) reflect.Type {
|
||||
if v, ok := messageTypeCache.Load(s); ok {
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
|
||||
// Derive the message type from the v2 registry.
|
||||
var t reflect.Type
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
|
||||
t = messageGoType(mt)
|
||||
}
|
||||
|
||||
// If we could not get a concrete type, it is possible that it is a
|
||||
// pseudo-message for a map entry.
|
||||
if t == nil {
|
||||
d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
|
||||
if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
|
||||
kt := goTypeForField(md.Fields().ByNumber(1))
|
||||
vt := goTypeForField(md.Fields().ByNumber(2))
|
||||
t = reflect.MapOf(kt, vt)
|
||||
}
|
||||
}
|
||||
|
||||
// Locally cache the message type for the given name.
|
||||
if t != nil {
|
||||
v, _ := messageTypeCache.LoadOrStore(s, t)
|
||||
return v.(reflect.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
|
||||
switch k := fd.Kind(); k {
|
||||
case protoreflect.EnumKind:
|
||||
if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
|
||||
return enumGoType(et)
|
||||
}
|
||||
return reflect.TypeOf(protoreflect.EnumNumber(0))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
|
||||
return messageGoType(mt)
|
||||
}
|
||||
return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
|
||||
default:
|
||||
return reflect.TypeOf(fd.Default().Interface())
|
||||
}
|
||||
}
|
||||
|
||||
func enumGoType(et protoreflect.EnumType) reflect.Type {
|
||||
return reflect.TypeOf(et.New(0))
|
||||
}
|
||||
|
||||
func messageGoType(mt protoreflect.MessageType) reflect.Type {
|
||||
return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
|
||||
}
|
||||
|
||||
// MessageName returns the full protobuf name for the given message type.
|
||||
//
|
||||
// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
|
||||
func MessageName(m Message) messageName {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
|
||||
}
|
||||
|
||||
// RegisterExtension is called from the generated code to register
|
||||
// the extension descriptor.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
|
||||
func RegisterExtension(d *ExtensionDesc) {
|
||||
if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
type extensionsByNumber = map[int32]*ExtensionDesc
|
||||
|
||||
var extensionCache sync.Map // map[messageName]extensionsByNumber
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions for the
|
||||
// provided protobuf message, indexed by the extension field number.
|
||||
//
|
||||
// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
|
||||
func RegisteredExtensions(m Message) extensionsByNumber {
|
||||
// Check whether the cache is stale. If the number of extensions for
|
||||
// the given message differs, then it means that some extensions were
|
||||
// recently registered upstream that we do not know about.
|
||||
s := MessageName(m)
|
||||
v, _ := extensionCache.Load(s)
|
||||
xs, _ := v.(extensionsByNumber)
|
||||
if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
|
||||
return xs // cache is up-to-date
|
||||
}
|
||||
|
||||
// Cache is stale, re-compute the extensions map.
|
||||
xs = make(extensionsByNumber)
|
||||
protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
|
||||
if xd, ok := xt.(*ExtensionDesc); ok {
|
||||
xs[int32(xt.TypeDescriptor().Number())] = xd
|
||||
} else {
|
||||
// TODO: This implies that the protoreflect.ExtensionType is a
|
||||
// custom type not generated by protoc-gen-go. We could try and
|
||||
// convert the type to an ExtensionDesc.
|
||||
}
|
||||
return true
|
||||
})
|
||||
extensionCache.Store(s, xs)
|
||||
return xs
|
||||
}
|
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
801
vendor/github.com/golang/protobuf/proto/text_decode.go
generated
vendored
Normal file
@ -0,0 +1,801 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextUnmarshalV2 = false
|
||||
|
||||
// ParseError is returned by UnmarshalText.
|
||||
type ParseError struct {
|
||||
Message string
|
||||
|
||||
// Deprecated: Do not use.
|
||||
Line, Offset int
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
if wrapTextUnmarshalV2 {
|
||||
return e.Message
|
||||
}
|
||||
if e.Line == 1 {
|
||||
return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", e.Line, e.Message)
|
||||
}
|
||||
|
||||
// UnmarshalText parses a proto text formatted string into m.
|
||||
func UnmarshalText(s string, m Message) error {
|
||||
if u, ok := m.(encoding.TextUnmarshaler); ok {
|
||||
return u.UnmarshalText([]byte(s))
|
||||
}
|
||||
|
||||
m.Reset()
|
||||
mi := MessageV2(m)
|
||||
|
||||
if wrapTextUnmarshalV2 {
|
||||
err := prototext.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
}.Unmarshal([]byte(s), mi)
|
||||
if err != nil {
|
||||
return &ParseError{Message: err.Error()}
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
} else {
|
||||
if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
|
||||
md := m.Descriptor()
|
||||
fds := md.Fields()
|
||||
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
seen := make(map[protoreflect.FieldNumber]bool)
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := protoreflect.Name(tok.value)
|
||||
fd := fds.ByName(name)
|
||||
switch {
|
||||
case fd == nil:
|
||||
gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
|
||||
if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
|
||||
fd = gd
|
||||
}
|
||||
case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
|
||||
fd = nil
|
||||
case fd.IsWeak() && fd.Message().IsPlaceholder():
|
||||
fd = nil
|
||||
}
|
||||
if fd == nil {
|
||||
typeName := string(md.FullName())
|
||||
if m, ok := m.Interface().(Message); ok {
|
||||
t := reflect.TypeOf(m)
|
||||
if t.Kind() == reflect.Ptr {
|
||||
typeName = t.Elem().String()
|
||||
}
|
||||
}
|
||||
return p.errorf("unknown field name %q in %v", name, typeName)
|
||||
}
|
||||
if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
|
||||
}
|
||||
if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
|
||||
return p.errorf("non-repeated field %q was repeated", fd.Name())
|
||||
}
|
||||
seen[fd.Number()] = true
|
||||
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
if v, err = p.unmarshalValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
|
||||
name, err := p.consumeExtensionOrAnyName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
|
||||
if err != nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
|
||||
}
|
||||
m2 := mt.New()
|
||||
if err := p.unmarshalMessage(m2, terminator); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := protoV2.Marshal(m2.Interface())
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
|
||||
}
|
||||
|
||||
urlFD := m.Descriptor().Fields().ByName("type_url")
|
||||
valFD := m.Descriptor().Fields().ByName("value")
|
||||
if seen[urlFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
|
||||
}
|
||||
if seen[valFD.Number()] {
|
||||
return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
|
||||
}
|
||||
m.Set(urlFD, protoreflect.ValueOfString(name))
|
||||
m.Set(valFD, protoreflect.ValueOfBytes(b))
|
||||
seen[urlFD.Number()] = true
|
||||
seen[valFD.Number()] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
xname := protoreflect.FullName(name)
|
||||
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
|
||||
if xt == nil && isMessageSet(m.Descriptor()) {
|
||||
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
|
||||
}
|
||||
if xt == nil {
|
||||
return p.errorf("unrecognized extension %q", name)
|
||||
}
|
||||
fd := xt.TypeDescriptor()
|
||||
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
|
||||
return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
|
||||
}
|
||||
|
||||
if err := p.checkForColon(fd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v := m.Get(fd)
|
||||
if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
|
||||
v = m.Mutable(fd)
|
||||
}
|
||||
v, err = p.unmarshalValue(v, fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.Set(fd, v)
|
||||
return p.consumeOptionalSeparator()
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := v.List()
|
||||
var err error
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return v, p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
vv := lv.NewElement()
|
||||
vv, err = p.unmarshalSingularValue(vv, fd)
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
lv.Append(vv)
|
||||
return v, nil
|
||||
case fd.IsMap():
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order.
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
|
||||
keyFD := fd.MapKey()
|
||||
valFD := fd.MapValue()
|
||||
|
||||
mv := v.Map()
|
||||
kv := keyFD.Default()
|
||||
vv := mv.NewValue()
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
var err error
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
|
||||
return v, err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
mv.Set(kv.MapKey(), vv)
|
||||
return v, nil
|
||||
default:
|
||||
p.back()
|
||||
return p.unmarshalSingularValue(v, fd)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return v, tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return v, p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
return protoreflect.ValueOfBool(true), nil
|
||||
case "false", "0", "f", "False":
|
||||
return protoreflect.ValueOfBool(false), nil
|
||||
}
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(x)), nil
|
||||
}
|
||||
|
||||
// The C++ parser accepts large positive hex numbers that uses
|
||||
// two's complement arithmetic to represent negative numbers.
|
||||
// This feature is here for backwards compatibility with C++.
|
||||
if strings.HasPrefix(tok.value, "0x") {
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
|
||||
}
|
||||
}
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfUint32(uint32(x)), nil
|
||||
}
|
||||
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
return protoreflect.ValueOfUint64(uint64(x)), nil
|
||||
}
|
||||
case protoreflect.FloatKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 32); err == nil {
|
||||
return protoreflect.ValueOfFloat32(float32(x)), nil
|
||||
}
|
||||
case protoreflect.DoubleKind:
|
||||
// Ignore 'f' for compatibility with output generated by C++,
|
||||
// but don't remove 'f' when the value is "-inf" or "inf".
|
||||
v := tok.value
|
||||
if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
|
||||
v = v[:len(v)-len("f")]
|
||||
}
|
||||
if x, err := strconv.ParseFloat(v, 64); err == nil {
|
||||
return protoreflect.ValueOfFloat64(float64(x)), nil
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfString(tok.unquoted), nil
|
||||
}
|
||||
case protoreflect.BytesKind:
|
||||
if isQuote(tok.value[0]) {
|
||||
return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
|
||||
}
|
||||
case protoreflect.EnumKind:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
|
||||
}
|
||||
vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
|
||||
if vd != nil {
|
||||
return protoreflect.ValueOfEnum(vd.Number()), nil
|
||||
}
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return v, p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
err := p.unmarshalMessage(v.Message(), terminator)
|
||||
return v, err
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
|
||||
}
|
||||
return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
if fd.Message() == nil {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
|
||||
// the following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtensionOrAnyName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in unmarshalMessage to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(rune(i)), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
560
vendor/github.com/golang/protobuf/proto/text_encode.go
generated
vendored
Normal file
@ -0,0 +1,560 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/encoding/prototext"
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const wrapTextMarshalV2 = false
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line)
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes the proto text format of m to w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
|
||||
b, err := tm.marshal(m)
|
||||
if len(b) > 0 {
|
||||
if _, err := w.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Text returns a proto text formatted string of m.
|
||||
func (tm *TextMarshaler) Text(m Message) string {
|
||||
b, _ := tm.marshal(m)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
|
||||
mr := MessageReflect(m)
|
||||
if mr == nil || !mr.IsValid() {
|
||||
return []byte("<nil>"), nil
|
||||
}
|
||||
|
||||
if wrapTextMarshalV2 {
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
return m.MarshalText()
|
||||
}
|
||||
|
||||
opts := prototext.MarshalOptions{
|
||||
AllowPartial: true,
|
||||
EmitUnknown: true,
|
||||
}
|
||||
if !tm.Compact {
|
||||
opts.Indent = " "
|
||||
}
|
||||
if !tm.ExpandAny {
|
||||
opts.Resolver = (*protoregistry.Types)(nil)
|
||||
}
|
||||
return opts.Marshal(mr.Interface())
|
||||
} else {
|
||||
w := &textWriter{
|
||||
compact: tm.Compact,
|
||||
expandAny: tm.ExpandAny,
|
||||
complete: true,
|
||||
}
|
||||
|
||||
if m, ok := m.(encoding.TextMarshaler); ok {
|
||||
b, err := m.MarshalText()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.Write(b)
|
||||
return w.buf, nil
|
||||
}
|
||||
|
||||
err := w.writeMessage(mr)
|
||||
return w.buf, err
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// MarshalText writes the proto text format of m to w.
|
||||
func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// MarshalTextString returns a proto text formatted string of m.
|
||||
func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
|
||||
|
||||
// CompactText writes the compact proto text format of m to w.
|
||||
func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
|
||||
|
||||
// CompactTextString returns a compact proto text formatted string of m.
|
||||
func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
endBraceNewline = []byte("}\n")
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
compact bool // same as TextMarshaler.Compact
|
||||
expandAny bool // same as TextMarshaler.ExpandAny
|
||||
complete bool // whether the current position is a complete line
|
||||
indent int // indentation level; never negative
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, _ error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, p...)
|
||||
w.complete = false
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
w.buf = append(w.buf, ' ')
|
||||
n++
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, frag...)
|
||||
n += len(frag)
|
||||
if i+1 < len(frags) {
|
||||
w.buf = append(w.buf, '\n')
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.buf = append(w.buf, c)
|
||||
w.complete = c == '\n'
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
|
||||
if fd.Kind() != protoreflect.GroupKind {
|
||||
w.buf = append(w.buf, fd.Name()...)
|
||||
w.WriteByte(':')
|
||||
} else {
|
||||
// Use message type name for group field name.
|
||||
w.buf = append(w.buf, fd.Message().Name()...)
|
||||
}
|
||||
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
|
||||
md := m.Descriptor()
|
||||
fdURL := md.Fields().ByName("type_url")
|
||||
fdVal := md.Fields().ByName("value")
|
||||
|
||||
url := m.Get(fdURL).String()
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
b := m.Get(fdVal).Bytes()
|
||||
m2 := mt.New()
|
||||
if err := proto.Unmarshal(b, m2.Interface()); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
if requiresQuotes(url) {
|
||||
w.writeQuotedString(url)
|
||||
} else {
|
||||
w.Write([]byte(url))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.indent++
|
||||
}
|
||||
if err := w.writeMessage(m2); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.indent--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeMessage(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if w.expandAny && md.FullName() == "google.protobuf.Any" {
|
||||
if canExpand, err := w.writeProto3Any(m); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fds := md.Fields()
|
||||
for i := 0; i < fds.Len(); {
|
||||
fd := fds.Get(i)
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
fd = m.WhichOneof(od)
|
||||
i += od.Fields().Len()
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
if fd == nil || !m.Has(fd) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
lv := m.Get(fd).List()
|
||||
for j := 0; j < lv.Len(); j++ {
|
||||
w.writeName(fd)
|
||||
v := lv.Get(j)
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
case fd.IsMap():
|
||||
kfd := fd.MapKey()
|
||||
vfd := fd.MapValue()
|
||||
mv := m.Get(fd).Map()
|
||||
|
||||
type entry struct{ key, val protoreflect.Value }
|
||||
var entries []entry
|
||||
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||
entries = append(entries, entry{k.Value(), v})
|
||||
return true
|
||||
})
|
||||
sort.Slice(entries, func(i, j int) bool {
|
||||
switch kfd.Kind() {
|
||||
case protoreflect.BoolKind:
|
||||
return !entries[i].key.Bool() && entries[j].key.Bool()
|
||||
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||
return entries[i].key.Int() < entries[j].key.Int()
|
||||
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||
return entries[i].key.Uint() < entries[j].key.Uint()
|
||||
case protoreflect.StringKind:
|
||||
return entries[i].key.String() < entries[j].key.String()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
})
|
||||
for _, entry := range entries {
|
||||
w.writeName(fd)
|
||||
w.WriteByte('<')
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
w.writeName(kfd)
|
||||
if err := w.writeSingularValue(entry.key, kfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.writeName(vfd)
|
||||
if err := w.writeSingularValue(entry.val, vfd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
w.indent--
|
||||
w.WriteByte('>')
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
default:
|
||||
w.writeName(fd)
|
||||
if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
if b := m.GetUnknown(); len(b) > 0 {
|
||||
w.writeUnknownFields(b)
|
||||
}
|
||||
return w.writeExtensions(m)
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
switch fd.Kind() {
|
||||
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||
switch vf := v.Float(); {
|
||||
case math.IsInf(vf, +1):
|
||||
w.Write(posInf)
|
||||
case math.IsInf(vf, -1):
|
||||
w.Write(negInf)
|
||||
case math.IsNaN(vf):
|
||||
w.Write(nan)
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
case protoreflect.StringKind:
|
||||
// NOTE: This does not validate UTF-8 for historical reasons.
|
||||
w.writeQuotedString(string(v.String()))
|
||||
case protoreflect.BytesKind:
|
||||
w.writeQuotedString(string(v.Bytes()))
|
||||
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||
var bra, ket byte = '<', '>'
|
||||
if fd.Kind() == protoreflect.GroupKind {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
w.WriteByte(bra)
|
||||
if !w.compact {
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
w.indent++
|
||||
m := v.Message()
|
||||
if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
|
||||
b, err := m2.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.Write(b)
|
||||
} else {
|
||||
w.writeMessage(m)
|
||||
}
|
||||
w.indent--
|
||||
w.WriteByte(ket)
|
||||
case protoreflect.EnumKind:
|
||||
if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
|
||||
fmt.Fprint(w, ev.Name())
|
||||
} else {
|
||||
fmt.Fprint(w, v.Enum())
|
||||
}
|
||||
default:
|
||||
fmt.Fprint(w, v.Interface())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeQuotedString writes a quoted string in the protocol buffer text format.
|
||||
func (w *textWriter) writeQuotedString(s string) {
|
||||
w.WriteByte('"')
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
w.buf = append(w.buf, `\n`...)
|
||||
case '\r':
|
||||
w.buf = append(w.buf, `\r`...)
|
||||
case '\t':
|
||||
w.buf = append(w.buf, `\t`...)
|
||||
case '"':
|
||||
w.buf = append(w.buf, `\"`...)
|
||||
case '\\':
|
||||
w.buf = append(w.buf, `\\`...)
|
||||
default:
|
||||
if isPrint := c >= 0x20 && c < 0x7f; isPrint {
|
||||
w.buf = append(w.buf, c)
|
||||
} else {
|
||||
w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
w.WriteByte('"')
|
||||
}
|
||||
|
||||
func (w *textWriter) writeUnknownFields(b []byte) {
|
||||
if !w.compact {
|
||||
fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
|
||||
}
|
||||
|
||||
for len(b) > 0 {
|
||||
num, wtyp, n := protowire.ConsumeTag(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
|
||||
if wtyp == protowire.EndGroupType {
|
||||
w.indent--
|
||||
w.Write(endBraceNewline)
|
||||
continue
|
||||
}
|
||||
fmt.Fprint(w, num)
|
||||
if wtyp != protowire.StartGroupType {
|
||||
w.WriteByte(':')
|
||||
}
|
||||
if !w.compact || wtyp == protowire.StartGroupType {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
switch wtyp {
|
||||
case protowire.VarintType:
|
||||
v, n := protowire.ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed32Type:
|
||||
v, n := protowire.ConsumeFixed32(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.Fixed64Type:
|
||||
v, n := protowire.ConsumeFixed64(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprint(w, v)
|
||||
case protowire.BytesType:
|
||||
v, n := protowire.ConsumeBytes(b)
|
||||
if n < 0 {
|
||||
return
|
||||
}
|
||||
b = b[n:]
|
||||
fmt.Fprintf(w, "%q", v)
|
||||
case protowire.StartGroupType:
|
||||
w.WriteByte('{')
|
||||
w.indent++
|
||||
default:
|
||||
fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
|
||||
// writeExtensions writes all the extensions in m.
|
||||
func (w *textWriter) writeExtensions(m protoreflect.Message) error {
|
||||
md := m.Descriptor()
|
||||
if md.ExtensionRanges().Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
type ext struct {
|
||||
desc protoreflect.FieldDescriptor
|
||||
val protoreflect.Value
|
||||
}
|
||||
var exts []ext
|
||||
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if fd.IsExtension() {
|
||||
exts = append(exts, ext{fd, v})
|
||||
}
|
||||
return true
|
||||
})
|
||||
sort.Slice(exts, func(i, j int) bool {
|
||||
return exts[i].desc.Number() < exts[j].desc.Number()
|
||||
})
|
||||
|
||||
for _, ext := range exts {
|
||||
// For message set, use the name of the message as the extension name.
|
||||
name := string(ext.desc.FullName())
|
||||
if isMessageSet(ext.desc.ContainingMessage()) {
|
||||
name = strings.TrimSuffix(name, ".message_set_extension")
|
||||
}
|
||||
|
||||
if !ext.desc.IsList() {
|
||||
if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
lv := ext.val.List()
|
||||
for i := 0; i < lv.Len(); i++ {
|
||||
if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||
fmt.Fprintf(w, "[%s]:", name)
|
||||
if !w.compact {
|
||||
w.WriteByte(' ')
|
||||
}
|
||||
if err := w.writeSingularValue(v, fd); err != nil {
|
||||
return err
|
||||
}
|
||||
w.WriteByte('\n')
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
for i := 0; i < w.indent*2; i++ {
|
||||
w.buf = append(w.buf, ' ')
|
||||
}
|
||||
w.complete = false
|
||||
}
|
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
78
vendor/github.com/golang/protobuf/proto/wire.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoV2 "google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/runtime/protoiface"
|
||||
)
|
||||
|
||||
// Size returns the size in bytes of the wire-format encoding of m.
|
||||
func Size(m Message) int {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
return protoV2.Size(mi)
|
||||
}
|
||||
|
||||
// Marshal returns the wire-format encoding of m.
|
||||
func Marshal(m Message) ([]byte, error) {
|
||||
b, err := marshalAppend(nil, m, false)
|
||||
if b == nil {
|
||||
b = zeroBytes
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
var zeroBytes = make([]byte, 0, 0)
|
||||
|
||||
func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, ErrNil
|
||||
}
|
||||
mi := MessageV2(m)
|
||||
nbuf, err := protoV2.MarshalOptions{
|
||||
Deterministic: deterministic,
|
||||
AllowPartial: true,
|
||||
}.MarshalAppend(buf, mi)
|
||||
if err != nil {
|
||||
return buf, err
|
||||
}
|
||||
if len(buf) == len(nbuf) {
|
||||
if !mi.ProtoReflect().IsValid() {
|
||||
return buf, ErrNil
|
||||
}
|
||||
}
|
||||
return nbuf, checkRequiredNotSet(mi)
|
||||
}
|
||||
|
||||
// Unmarshal parses a wire-format message in b and places the decoded results in m.
|
||||
//
|
||||
// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
|
||||
// removed. Use UnmarshalMerge to preserve and append to existing data.
|
||||
func Unmarshal(b []byte, m Message) error {
|
||||
m.Reset()
|
||||
return UnmarshalMerge(b, m)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
|
||||
func UnmarshalMerge(b []byte, m Message) error {
|
||||
mi := MessageV2(m)
|
||||
out, err := protoV2.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
Merge: true,
|
||||
}.UnmarshalState(protoiface.UnmarshalInput{
|
||||
Buf: b,
|
||||
Message: mi.ProtoReflect(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out.Flags&protoiface.UnmarshalInitialized > 0 {
|
||||
return nil
|
||||
}
|
||||
return checkRequiredNotSet(mi)
|
||||
}
|
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
34
vendor/github.com/golang/protobuf/proto/wrappers.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package proto
|
||||
|
||||
// Bool stores v in a new bool value and returns a pointer to it.
|
||||
func Bool(v bool) *bool { return &v }
|
||||
|
||||
// Int stores v in a new int32 value and returns a pointer to it.
|
||||
//
|
||||
// Deprecated: Use Int32 instead.
|
||||
func Int(v int) *int32 { return Int32(int32(v)) }
|
||||
|
||||
// Int32 stores v in a new int32 value and returns a pointer to it.
|
||||
func Int32(v int32) *int32 { return &v }
|
||||
|
||||
// Int64 stores v in a new int64 value and returns a pointer to it.
|
||||
func Int64(v int64) *int64 { return &v }
|
||||
|
||||
// Uint32 stores v in a new uint32 value and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 { return &v }
|
||||
|
||||
// Uint64 stores v in a new uint64 value and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 { return &v }
|
||||
|
||||
// Float32 stores v in a new float32 value and returns a pointer to it.
|
||||
func Float32(v float32) *float32 { return &v }
|
||||
|
||||
// Float64 stores v in a new float64 value and returns a pointer to it.
|
||||
func Float64(v float64) *float64 { return &v }
|
||||
|
||||
// String stores v in a new string value and returns a pointer to it.
|
||||
func String(v string) *string { return &v }
|
53
vendor/github.com/google/gopacket/AUTHORS
generated
vendored
Normal file
53
vendor/github.com/google/gopacket/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
AUTHORS AND MAINTAINERS:
|
||||
|
||||
MAIN DEVELOPERS:
|
||||
Graeme Connell <gconnell@google.com, gsconnell@gmail.com>
|
||||
|
||||
AUTHORS:
|
||||
Nigel Tao <nigeltao@google.com>
|
||||
Cole Mickens <cole.mickens@gmail.com>
|
||||
Ben Daglish <bdaglish@restorepoint.com>
|
||||
Luis Martinez <martinezlc99@gmail.com>
|
||||
Remco Verhoef <remco@dutchcoders.io>
|
||||
Hiroaki Kawai <Hiroaki.Kawai@gmail.com>
|
||||
Lukas Lueg <lukas.lueg@gmail.com>
|
||||
Laurent Hausermann <laurent.hausermann@gmail.com>
|
||||
Bill Green <bgreen@newrelic.com>
|
||||
Christian Mäder <christian.maeder@nine.ch>
|
||||
Gernot Vormayr <gvormayr@gmail.com>
|
||||
Vitor Garcia Graveto <victor.graveto@gmail.com>
|
||||
Elias Chavarria Reyes <elchavar@cisco.com>
|
||||
Daniel Rittweiler <ripx80@protonmail.com>
|
||||
|
||||
CONTRIBUTORS:
|
||||
Attila Oláh <attila@attilaolah.eu>
|
||||
Vittus Mikiassen <matt.miki.vimik@gmail.com>
|
||||
Matthias Radestock <matthias.radestock@gmail.com>
|
||||
Matthew Sackman <matthew@wellquite.org>
|
||||
Loic Prylli <loicp@google.com>
|
||||
Alexandre Fiori <fiorix@gmail.com>
|
||||
Adrian Tam <adrian.c.m.tam@gmail.com>
|
||||
Satoshi Matsumoto <kaorimatz@gmail.com>
|
||||
David Stainton <dstainton415@gmail.com>
|
||||
Jesse Ward <jesse@jesseward.com>
|
||||
Kane Mathers <kane@kanemathers.name>
|
||||
Jose Selvi <jselvi@pentester.es>
|
||||
Yerden Zhumabekov <yerden.zhumabekov@gmail.com>
|
||||
|
||||
-----------------------------------------------
|
||||
FORKED FROM github.com/akrennmair/gopcap
|
||||
ALL THE FOLLOWING ARE FOR THAT PROJECT
|
||||
|
||||
MAIN DEVELOPERS:
|
||||
Andreas Krennmair <ak@synflood.at>
|
||||
|
||||
CONTRIBUTORS:
|
||||
Andrea Nall <anall@andreanall.com>
|
||||
Daniel Arndt <danielarndt@gmail.com>
|
||||
Dustin Sallings <dustin@spy.net>
|
||||
Graeme Connell <gconnell@google.com, gsconnell@gmail.com>
|
||||
Guillaume Savary <guillaume@savary.name>
|
||||
Mark Smith <mark@qq.is>
|
||||
Miek Gieben <miek@miek.nl>
|
||||
Mike Bell <mike@mikebell.org>
|
||||
Trevor Strohman <strohman@google.com>
|
28
vendor/github.com/google/gopacket/LICENSE
generated
vendored
Normal file
28
vendor/github.com/google/gopacket/LICENSE
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
Copyright (c) 2012 Google, Inc. All rights reserved.
|
||||
Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Andreas Krennmair, Google, nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
36
vendor/github.com/google/gopacket/routing/common.go
generated
vendored
Normal file
36
vendor/github.com/google/gopacket/routing/common.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
// Copyright 2012 Google, Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license
|
||||
// that can be found in the LICENSE file in the root of the source
|
||||
// tree.
|
||||
|
||||
package routing
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// Router implements simple IPv4/IPv6 routing based on the kernel's routing
|
||||
// table. This routing library has very few features and may actually route
|
||||
// incorrectly in some cases, but it should work the majority of the time.
|
||||
type Router interface {
|
||||
// Route returns where to route a packet based on the packet's source
|
||||
// and destination IP address.
|
||||
//
|
||||
// Callers may pass in nil for src, in which case the src is treated as
|
||||
// either 0.0.0.0 or ::, depending on whether dst is a v4 or v6 address.
|
||||
//
|
||||
// It returns the interface on which to send the packet, the gateway IP
|
||||
// to send the packet to (if necessary), the preferred src IP to use (if
|
||||
// available). If the preferred src address is not given in the routing
|
||||
// table, the first IP address of the interface is provided.
|
||||
//
|
||||
// If an error is encountered, iface, geteway, and
|
||||
// preferredSrc will be nil, and err will be set.
|
||||
Route(dst net.IP) (iface *net.Interface, gateway, preferredSrc net.IP, err error)
|
||||
|
||||
// RouteWithSrc routes based on source information as well as destination
|
||||
// information. Either or both of input/src can be nil. If both are, this
|
||||
// should behave exactly like Route(dst)
|
||||
RouteWithSrc(input net.HardwareAddr, src, dst net.IP) (iface *net.Interface, gateway, preferredSrc net.IP, err error)
|
||||
}
|
15
vendor/github.com/google/gopacket/routing/other.go
generated
vendored
Normal file
15
vendor/github.com/google/gopacket/routing/other.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2012 Google, Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license
|
||||
// that can be found in the LICENSE file in the root of the source
|
||||
// tree.
|
||||
|
||||
// +build !linux
|
||||
|
||||
// Package routing is currently only supported in Linux, but the build system requires a valid go file for all architectures.
|
||||
|
||||
package routing
|
||||
|
||||
func New() (Router, error) {
|
||||
panic("router only implemented in linux")
|
||||
}
|
241
vendor/github.com/google/gopacket/routing/routing.go
generated
vendored
Normal file
241
vendor/github.com/google/gopacket/routing/routing.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
// Copyright 2012 Google, Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license
|
||||
// that can be found in the LICENSE file in the root of the source
|
||||
// tree.
|
||||
|
||||
// +build linux
|
||||
|
||||
// Package routing provides a very basic but mostly functional implementation of
|
||||
// a routing table for IPv4/IPv6 addresses. It uses a routing table pulled from
|
||||
// the kernel via netlink to find the correct interface, gateway, and preferred
|
||||
// source IP address for packets destined to a particular location.
|
||||
//
|
||||
// The routing package is meant to be used with applications that are sending
|
||||
// raw packet data, which don't have the benefit of having the kernel route
|
||||
// packets for them.
|
||||
package routing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pulled from http://man7.org/linux/man-pages/man7/rtnetlink.7.html
|
||||
// See the section on RTM_NEWROUTE, specifically 'struct rtmsg'.
|
||||
type routeInfoInMemory struct {
|
||||
Family byte
|
||||
DstLen byte
|
||||
SrcLen byte
|
||||
TOS byte
|
||||
|
||||
Table byte
|
||||
Protocol byte
|
||||
Scope byte
|
||||
Type byte
|
||||
|
||||
Flags uint32
|
||||
}
|
||||
|
||||
// rtInfo contains information on a single route.
|
||||
type rtInfo struct {
|
||||
Src, Dst *net.IPNet
|
||||
Gateway, PrefSrc net.IP
|
||||
// We currently ignore the InputIface.
|
||||
InputIface, OutputIface uint32
|
||||
Priority uint32
|
||||
}
|
||||
|
||||
// routeSlice implements sort.Interface to sort routes by Priority.
|
||||
type routeSlice []*rtInfo
|
||||
|
||||
func (r routeSlice) Len() int {
|
||||
return len(r)
|
||||
}
|
||||
func (r routeSlice) Less(i, j int) bool {
|
||||
return r[i].Priority < r[j].Priority
|
||||
}
|
||||
func (r routeSlice) Swap(i, j int) {
|
||||
r[i], r[j] = r[j], r[i]
|
||||
}
|
||||
|
||||
type router struct {
|
||||
ifaces []net.Interface
|
||||
addrs []ipAddrs
|
||||
v4, v6 routeSlice
|
||||
}
|
||||
|
||||
func (r *router) String() string {
|
||||
strs := []string{"ROUTER", "--- V4 ---"}
|
||||
for _, route := range r.v4 {
|
||||
strs = append(strs, fmt.Sprintf("%+v", *route))
|
||||
}
|
||||
strs = append(strs, "--- V6 ---")
|
||||
for _, route := range r.v6 {
|
||||
strs = append(strs, fmt.Sprintf("%+v", *route))
|
||||
}
|
||||
return strings.Join(strs, "\n")
|
||||
}
|
||||
|
||||
type ipAddrs struct {
|
||||
v4, v6 net.IP
|
||||
}
|
||||
|
||||
func (r *router) Route(dst net.IP) (iface *net.Interface, gateway, preferredSrc net.IP, err error) {
|
||||
return r.RouteWithSrc(nil, nil, dst)
|
||||
}
|
||||
|
||||
func (r *router) RouteWithSrc(input net.HardwareAddr, src, dst net.IP) (iface *net.Interface, gateway, preferredSrc net.IP, err error) {
|
||||
var ifaceIndex int
|
||||
switch {
|
||||
case dst.To4() != nil:
|
||||
ifaceIndex, gateway, preferredSrc, err = r.route(r.v4, input, src, dst)
|
||||
case dst.To16() != nil:
|
||||
ifaceIndex, gateway, preferredSrc, err = r.route(r.v6, input, src, dst)
|
||||
default:
|
||||
err = errors.New("IP is not valid as IPv4 or IPv6")
|
||||
return
|
||||
}
|
||||
|
||||
// Interfaces are 1-indexed, but we store them in a 0-indexed array.
|
||||
ifaceIndex--
|
||||
|
||||
iface = &r.ifaces[ifaceIndex]
|
||||
if preferredSrc == nil {
|
||||
switch {
|
||||
case dst.To4() != nil:
|
||||
preferredSrc = r.addrs[ifaceIndex].v4
|
||||
case dst.To16() != nil:
|
||||
preferredSrc = r.addrs[ifaceIndex].v6
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *router) route(routes routeSlice, input net.HardwareAddr, src, dst net.IP) (iface int, gateway, preferredSrc net.IP, err error) {
|
||||
var inputIndex uint32
|
||||
if input != nil {
|
||||
for i, iface := range r.ifaces {
|
||||
if bytes.Equal(input, iface.HardwareAddr) {
|
||||
// Convert from zero- to one-indexed.
|
||||
inputIndex = uint32(i + 1)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, rt := range routes {
|
||||
if rt.InputIface != 0 && rt.InputIface != inputIndex {
|
||||
continue
|
||||
}
|
||||
if rt.Src != nil && !rt.Src.Contains(src) {
|
||||
continue
|
||||
}
|
||||
if rt.Dst != nil && !rt.Dst.Contains(dst) {
|
||||
continue
|
||||
}
|
||||
return int(rt.OutputIface), rt.Gateway, rt.PrefSrc, nil
|
||||
}
|
||||
err = fmt.Errorf("no route found for %v", dst)
|
||||
return
|
||||
}
|
||||
|
||||
// New creates a new router object. The router returned by New currently does
|
||||
// not update its routes after construction... care should be taken for
|
||||
// long-running programs to call New() regularly to take into account any
|
||||
// changes to the routing table which have occurred since the last New() call.
|
||||
func New() (Router, error) {
|
||||
rtr := &router{}
|
||||
tab, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgs, err := syscall.ParseNetlinkMessage(tab)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loop:
|
||||
for _, m := range msgs {
|
||||
switch m.Header.Type {
|
||||
case syscall.NLMSG_DONE:
|
||||
break loop
|
||||
case syscall.RTM_NEWROUTE:
|
||||
rt := (*routeInfoInMemory)(unsafe.Pointer(&m.Data[0]))
|
||||
routeInfo := rtInfo{}
|
||||
attrs, err := syscall.ParseNetlinkRouteAttr(&m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch rt.Family {
|
||||
case syscall.AF_INET:
|
||||
rtr.v4 = append(rtr.v4, &routeInfo)
|
||||
case syscall.AF_INET6:
|
||||
rtr.v6 = append(rtr.v6, &routeInfo)
|
||||
default:
|
||||
continue loop
|
||||
}
|
||||
for _, attr := range attrs {
|
||||
switch attr.Attr.Type {
|
||||
case syscall.RTA_DST:
|
||||
routeInfo.Dst = &net.IPNet{
|
||||
IP: net.IP(attr.Value),
|
||||
Mask: net.CIDRMask(int(rt.DstLen), len(attr.Value)*8),
|
||||
}
|
||||
case syscall.RTA_SRC:
|
||||
routeInfo.Src = &net.IPNet{
|
||||
IP: net.IP(attr.Value),
|
||||
Mask: net.CIDRMask(int(rt.SrcLen), len(attr.Value)*8),
|
||||
}
|
||||
case syscall.RTA_GATEWAY:
|
||||
routeInfo.Gateway = net.IP(attr.Value)
|
||||
case syscall.RTA_PREFSRC:
|
||||
routeInfo.PrefSrc = net.IP(attr.Value)
|
||||
case syscall.RTA_IIF:
|
||||
routeInfo.InputIface = *(*uint32)(unsafe.Pointer(&attr.Value[0]))
|
||||
case syscall.RTA_OIF:
|
||||
routeInfo.OutputIface = *(*uint32)(unsafe.Pointer(&attr.Value[0]))
|
||||
case syscall.RTA_PRIORITY:
|
||||
routeInfo.Priority = *(*uint32)(unsafe.Pointer(&attr.Value[0]))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Sort(rtr.v4)
|
||||
sort.Sort(rtr.v6)
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, iface := range ifaces {
|
||||
if i != iface.Index-1 {
|
||||
return nil, fmt.Errorf("out of order iface %d = %v", i, iface)
|
||||
}
|
||||
rtr.ifaces = append(rtr.ifaces, iface)
|
||||
var addrs ipAddrs
|
||||
ifaceAddrs, err := iface.Addrs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, addr := range ifaceAddrs {
|
||||
if inet, ok := addr.(*net.IPNet); ok {
|
||||
// Go has a nasty habit of giving you IPv4s as ::ffff:1.2.3.4 instead of 1.2.3.4.
|
||||
// We want to use mapped v4 addresses as v4 preferred addresses, never as v6
|
||||
// preferred addresses.
|
||||
if v4 := inet.IP.To4(); v4 != nil {
|
||||
if addrs.v4 == nil {
|
||||
addrs.v4 = v4
|
||||
}
|
||||
} else if addrs.v6 == nil {
|
||||
addrs.v6 = inet.IP
|
||||
}
|
||||
}
|
||||
}
|
||||
rtr.addrs = append(rtr.addrs, addrs)
|
||||
}
|
||||
return rtr, nil
|
||||
}
|
9
vendor/github.com/google/uuid/.travis.yml
generated
vendored
Normal file
9
vendor/github.com/google/uuid/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.3
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test -v ./...
|
10
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
Normal file
10
vendor/github.com/google/uuid/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
# How to contribute
|
||||
|
||||
We definitely welcome patches and contribution to this project!
|
||||
|
||||
### Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
|
||||
You may have already signed it for other Google projects.
|
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
Normal file
9
vendor/github.com/google/uuid/CONTRIBUTORS
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
Paul Borman <borman@google.com>
|
||||
bmatsuo
|
||||
shawnps
|
||||
theory
|
||||
jboverfelt
|
||||
dsymonds
|
||||
cd1
|
||||
wallclockbuilder
|
||||
dansouza
|
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/uuid/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
19
vendor/github.com/google/uuid/README.md
generated
vendored
Normal file
19
vendor/github.com/google/uuid/README.md
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# uuid 
|
||||
The uuid package generates and inspects UUIDs based on
|
||||
[RFC 4122](http://tools.ietf.org/html/rfc4122)
|
||||
and DCE 1.1: Authentication and Security Services.
|
||||
|
||||
This package is based on the github.com/pborman/uuid package (previously named
|
||||
code.google.com/p/go-uuid). It differs from these earlier packages in that
|
||||
a UUID is a 16 byte array rather than a byte slice. One loss due to this
|
||||
change is the ability to represent an invalid UUID (vs a NIL UUID).
|
||||
|
||||
###### Install
|
||||
`go get github.com/google/uuid`
|
||||
|
||||
###### Documentation
|
||||
[](http://godoc.org/github.com/google/uuid)
|
||||
|
||||
Full `go doc` style documentation for the package can be viewed online without
|
||||
installing this package by using the GoDoc site here:
|
||||
http://pkg.go.dev/github.com/google/uuid
|
80
vendor/github.com/google/uuid/dce.go
generated
vendored
Normal file
80
vendor/github.com/google/uuid/dce.go
generated
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A Domain represents a Version 2 domain
|
||||
type Domain byte
|
||||
|
||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||
const (
|
||||
Person = Domain(0)
|
||||
Group = Domain(1)
|
||||
Org = Domain(2)
|
||||
)
|
||||
|
||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||
//
|
||||
// The domain should be one of Person, Group or Org.
|
||||
// On a POSIX system the id should be the users UID for the Person
|
||||
// domain and the users GID for the Group. The meaning of id for
|
||||
// the domain Org or on non-POSIX systems is site defined.
|
||||
//
|
||||
// For a given domain/id pair the same token may be returned for up to
|
||||
// 7 minutes and 10 seconds.
|
||||
func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
|
||||
uuid, err := NewUUID()
|
||||
if err == nil {
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||
uuid[9] = byte(domain)
|
||||
binary.BigEndian.PutUint32(uuid[0:], id)
|
||||
}
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||
// domain with the id returned by os.Getuid.
|
||||
//
|
||||
// NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
func NewDCEPerson() (UUID, error) {
|
||||
return NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
}
|
||||
|
||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||
// domain with the id returned by os.Getgid.
|
||||
//
|
||||
// NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
func NewDCEGroup() (UUID, error) {
|
||||
return NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
// Domain returns the domain for a Version 2 UUID. Domains are only defined
|
||||
// for Version 2 UUIDs.
|
||||
func (uuid UUID) Domain() Domain {
|
||||
return Domain(uuid[9])
|
||||
}
|
||||
|
||||
// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
|
||||
// UUIDs.
|
||||
func (uuid UUID) ID() uint32 {
|
||||
return binary.BigEndian.Uint32(uuid[0:4])
|
||||
}
|
||||
|
||||
func (d Domain) String() string {
|
||||
switch d {
|
||||
case Person:
|
||||
return "Person"
|
||||
case Group:
|
||||
return "Group"
|
||||
case Org:
|
||||
return "Org"
|
||||
}
|
||||
return fmt.Sprintf("Domain%d", int(d))
|
||||
}
|
12
vendor/github.com/google/uuid/doc.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/doc.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package uuid generates and inspects UUIDs.
|
||||
//
|
||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
|
||||
// Services.
|
||||
//
|
||||
// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
|
||||
// maps or compared directly.
|
||||
package uuid
|
1
vendor/github.com/google/uuid/go.mod
generated
vendored
Normal file
1
vendor/github.com/google/uuid/go.mod
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
module github.com/google/uuid
|
53
vendor/github.com/google/uuid/hash.go
generated
vendored
Normal file
53
vendor/github.com/google/uuid/hash.go
generated
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Well known namespace IDs and UUIDs
|
||||
var (
|
||||
NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
|
||||
Nil UUID // empty UUID, all zeros
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space[:]) //nolint:errcheck
|
||||
h.Write(data) //nolint:errcheck
|
||||
s := h.Sum(nil)
|
||||
var uuid UUID
|
||||
copy(uuid[:], s)
|
||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(md5.New(), space, data, 3)
|
||||
func NewMD5(space UUID, data []byte) UUID {
|
||||
return NewHash(md5.New(), space, data, 3)
|
||||
}
|
||||
|
||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||
// supplied name space and data. It is the same as calling:
|
||||
//
|
||||
// NewHash(sha1.New(), space, data, 5)
|
||||
func NewSHA1(space UUID, data []byte) UUID {
|
||||
return NewHash(sha1.New(), space, data, 5)
|
||||
}
|
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
Normal file
38
vendor/github.com/google/uuid/marshal.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "fmt"
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (uuid UUID) MarshalText() ([]byte, error) {
|
||||
var js [36]byte
|
||||
encodeHex(js[:], uuid)
|
||||
return js[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalText(data []byte) error {
|
||||
id, err := ParseBytes(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*uuid = id
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (uuid UUID) MarshalBinary() ([]byte, error) {
|
||||
return uuid[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (uuid *UUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(uuid[:], data)
|
||||
return nil
|
||||
}
|
90
vendor/github.com/google/uuid/node.go
generated
vendored
Normal file
90
vendor/github.com/google/uuid/node.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
nodeMu sync.Mutex
|
||||
ifname string // name of interface being used
|
||||
nodeID [6]byte // hardware for version 1 UUIDs
|
||||
zeroID [6]byte // nodeID with only 0's
|
||||
)
|
||||
|
||||
// NodeInterface returns the name of the interface from which the NodeID was
|
||||
// derived. The interface "user" is returned if the NodeID was set by
|
||||
// SetNodeID.
|
||||
func NodeInterface() string {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return ifname
|
||||
}
|
||||
|
||||
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
||||
// If name is "" then the first usable interface found will be used or a random
|
||||
// Node ID will be generated. If a named interface cannot be found then false
|
||||
// is returned.
|
||||
//
|
||||
// SetNodeInterface never fails when name is "".
|
||||
func SetNodeInterface(name string) bool {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
return setNodeInterface(name)
|
||||
}
|
||||
|
||||
func setNodeInterface(name string) bool {
|
||||
iname, addr := getHardwareInterface(name) // null implementation for js
|
||||
if iname != "" && addr != nil {
|
||||
ifname = iname
|
||||
copy(nodeID[:], addr)
|
||||
return true
|
||||
}
|
||||
|
||||
// We found no interfaces with a valid hardware address. If name
|
||||
// does not specify a specific interface generate a random Node ID
|
||||
// (section 4.1.6)
|
||||
if name == "" {
|
||||
ifname = "random"
|
||||
randomBits(nodeID[:])
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||
// if not already set.
|
||||
func NodeID() []byte {
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
nid := nodeID
|
||||
return nid[:]
|
||||
}
|
||||
|
||||
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||
// Node ID is not set.
|
||||
func SetNodeID(id []byte) bool {
|
||||
if len(id) < 6 {
|
||||
return false
|
||||
}
|
||||
defer nodeMu.Unlock()
|
||||
nodeMu.Lock()
|
||||
copy(nodeID[:], id)
|
||||
ifname = "user"
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) NodeID() []byte {
|
||||
var node [6]byte
|
||||
copy(node[:], uuid[10:])
|
||||
return node[:]
|
||||
}
|
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
12
vendor/github.com/google/uuid/node_js.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build js
|
||||
|
||||
package uuid
|
||||
|
||||
// getHardwareInterface returns nil values for the JS version of the code.
|
||||
// This remvoves the "net" dependency, because it is not used in the browser.
|
||||
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
|
||||
func getHardwareInterface(name string) (string, []byte) { return "", nil }
|
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
33
vendor/github.com/google/uuid/node_net.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright 2017 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !js
|
||||
|
||||
package uuid
|
||||
|
||||
import "net"
|
||||
|
||||
var interfaces []net.Interface // cached list of interfaces
|
||||
|
||||
// getHardwareInterface returns the name and hardware address of interface name.
|
||||
// If name is "" then the name and hardware address of one of the system's
|
||||
// interfaces is returned. If no interfaces are found (name does not exist or
|
||||
// there are no interfaces) then "", nil is returned.
|
||||
//
|
||||
// Only addresses of at least 6 bytes are returned.
|
||||
func getHardwareInterface(name string) (string, []byte) {
|
||||
if interfaces == nil {
|
||||
var err error
|
||||
interfaces, err = net.Interfaces()
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
for _, ifs := range interfaces {
|
||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
||||
return ifs.Name, ifs.HardwareAddr
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
59
vendor/github.com/google/uuid/sql.go
generated
vendored
Normal file
59
vendor/github.com/google/uuid/sql.go
generated
vendored
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
|
||||
// Currently, database types that map to string and []byte are supported. Please
|
||||
// consult database-specific driver documentation for matching types.
|
||||
func (uuid *UUID) Scan(src interface{}) error {
|
||||
switch src := src.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
|
||||
case string:
|
||||
// if an empty UUID comes from a table, we return a null UUID
|
||||
if src == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// see Parse for required string format
|
||||
u, err := Parse(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Scan: %v", err)
|
||||
}
|
||||
|
||||
*uuid = u
|
||||
|
||||
case []byte:
|
||||
// if an empty UUID comes from a table, we return a null UUID
|
||||
if len(src) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// assumes a simple slice of bytes if 16 bytes
|
||||
// otherwise attempts to parse
|
||||
if len(src) != 16 {
|
||||
return uuid.Scan(string(src))
|
||||
}
|
||||
copy((*uuid)[:], src)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value implements sql.Valuer so that UUIDs can be written to databases
|
||||
// transparently. Currently, UUIDs map to strings. Please consult
|
||||
// database-specific driver documentation for matching types.
|
||||
func (uuid UUID) Value() (driver.Value, error) {
|
||||
return uuid.String(), nil
|
||||
}
|
123
vendor/github.com/google/uuid/time.go
generated
vendored
Normal file
123
vendor/github.com/google/uuid/time.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
||||
// 1582.
|
||||
type Time int64
|
||||
|
||||
const (
|
||||
lillian = 2299160 // Julian day of 15 Oct 1582
|
||||
unix = 2440587 // Julian day of 1 Jan 1970
|
||||
epoch = unix - lillian // Days between epochs
|
||||
g1582 = epoch * 86400 // seconds between epochs
|
||||
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
||||
)
|
||||
|
||||
var (
|
||||
timeMu sync.Mutex
|
||||
lasttime uint64 // last time we returned
|
||||
clockSeq uint16 // clock sequence for this run
|
||||
|
||||
timeNow = time.Now // for testing
|
||||
)
|
||||
|
||||
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
||||
// epoch of 1 Jan 1970.
|
||||
func (t Time) UnixTime() (sec, nsec int64) {
|
||||
sec = int64(t - g1582ns100)
|
||||
nsec = (sec % 10000000) * 100
|
||||
sec /= 10000000
|
||||
return sec, nsec
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// clock sequence as well as adjusting the clock sequence as needed. An error
|
||||
// is returned if the current time cannot be determined.
|
||||
func GetTime() (Time, uint16, error) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, uint16, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
if clockSeq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
now := uint64(t.UnixNano()/100) + g1582ns100
|
||||
|
||||
// If time has gone backwards with this clock sequence then we
|
||||
// increment the clock sequence
|
||||
if now <= lasttime {
|
||||
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), clockSeq, nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
||||
// already set. The clock sequence is only used for Version 1 UUIDs.
|
||||
//
|
||||
// The uuid package does not use global static storage for the clock sequence or
|
||||
// the last time a UUID was generated. Unless SetClockSequence is used, a new
|
||||
// random clock sequence is generated the first time a clock sequence is
|
||||
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
|
||||
func ClockSequence() int {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
return clockSequence()
|
||||
}
|
||||
|
||||
func clockSequence() int {
|
||||
if clockSeq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
return int(clockSeq & 0x3fff)
|
||||
}
|
||||
|
||||
// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||
// -1 causes a new sequence to be generated.
|
||||
func SetClockSequence(seq int) {
|
||||
defer timeMu.Unlock()
|
||||
timeMu.Lock()
|
||||
setClockSequence(seq)
|
||||
}
|
||||
|
||||
func setClockSequence(seq int) {
|
||||
if seq == -1 {
|
||||
var b [2]byte
|
||||
randomBits(b[:]) // clock sequence
|
||||
seq = int(b[0])<<8 | int(b[1])
|
||||
}
|
||||
oldSeq := clockSeq
|
||||
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||
if oldSeq != clockSeq {
|
||||
lasttime = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||
// uuid. The time is only defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) Time() Time {
|
||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
||||
return Time(time)
|
||||
}
|
||||
|
||||
// ClockSequence returns the clock sequence encoded in uuid.
|
||||
// The clock sequence is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) ClockSequence() int {
|
||||
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
|
||||
}
|
43
vendor/github.com/google/uuid/util.go
generated
vendored
Normal file
43
vendor/github.com/google/uuid/util.go
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// randomBits completely fills slice b with random data.
|
||||
func randomBits(b []byte) {
|
||||
if _, err := io.ReadFull(rander, b); err != nil {
|
||||
panic(err.Error()) // rand should never fail
|
||||
}
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = [256]byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts hex characters x1 and x2 into a byte.
|
||||
func xtob(x1, x2 byte) (byte, bool) {
|
||||
b1 := xvalues[x1]
|
||||
b2 := xvalues[x2]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
251
vendor/github.com/google/uuid/uuid.go
generated
vendored
Normal file
251
vendor/github.com/google/uuid/uuid.go
generated
vendored
Normal file
@ -0,0 +1,251 @@
|
||||
// Copyright 2018 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
// 4122.
|
||||
type UUID [16]byte
|
||||
|
||||
// A Version represents a UUID's version.
|
||||
type Version byte
|
||||
|
||||
// A Variant represents a UUID's variant.
|
||||
type Variant byte
|
||||
|
||||
// Constants returned by Variant.
|
||||
const (
|
||||
Invalid = Variant(iota) // Invalid UUID
|
||||
RFC4122 // The variant specified in RFC4122
|
||||
Reserved // Reserved, NCS backward compatibility.
|
||||
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
|
||||
type invalidLengthError struct{ len int }
|
||||
|
||||
func (err invalidLengthError) Error() string {
|
||||
return fmt.Sprintf("invalid UUID length: %d", err.len)
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns an error. Both the standard UUID
|
||||
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
|
||||
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
|
||||
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
|
||||
func Parse(s string) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(s) {
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36:
|
||||
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9:
|
||||
if strings.ToLower(s[:9]) != "urn:uuid:" {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
|
||||
}
|
||||
s = s[9:]
|
||||
|
||||
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
case 36 + 2:
|
||||
s = s[1:]
|
||||
|
||||
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
case 32:
|
||||
var ok bool
|
||||
for i := range uuid {
|
||||
uuid[i], ok = xtob(s[i*2], s[i*2+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, invalidLengthError{len(s)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34} {
|
||||
v, ok := xtob(s[x], s[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
uuid[i] = v
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// ParseBytes is like Parse, except it parses a byte slice instead of a string.
|
||||
func ParseBytes(b []byte) (UUID, error) {
|
||||
var uuid UUID
|
||||
switch len(b) {
|
||||
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
|
||||
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
|
||||
}
|
||||
b = b[9:]
|
||||
case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
|
||||
b = b[1:]
|
||||
case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
var ok bool
|
||||
for i := 0; i < 32; i += 2 {
|
||||
uuid[i/2], ok = xtob(b[i], b[i+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
}
|
||||
return uuid, nil
|
||||
default:
|
||||
return uuid, invalidLengthError{len(b)}
|
||||
}
|
||||
// s is now at least 36 bytes long
|
||||
// it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
for i, x := range [16]int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34} {
|
||||
v, ok := xtob(b[x], b[x+1])
|
||||
if !ok {
|
||||
return uuid, errors.New("invalid UUID format")
|
||||
}
|
||||
uuid[i] = v
|
||||
}
|
||||
return uuid, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the string cannot be parsed.
|
||||
// It simplifies safe initialization of global variables holding compiled UUIDs.
|
||||
func MustParse(s string) UUID {
|
||||
uuid, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`uuid: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
|
||||
// does not have a length of 16. The bytes are copied from the slice.
|
||||
func FromBytes(b []byte) (uuid UUID, err error) {
|
||||
err = uuid.UnmarshalBinary(b)
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
// Must returns uuid if err is nil and panics otherwise.
|
||||
func Must(uuid UUID, err error) UUID {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
var buf [36]byte
|
||||
encodeHex(buf[:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
// URN returns the RFC 2141 URN form of uuid,
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||
func (uuid UUID) URN() string {
|
||||
var buf [36 + 9]byte
|
||||
copy(buf[:], "urn:uuid:")
|
||||
encodeHex(buf[9:], uuid)
|
||||
return string(buf[:])
|
||||
}
|
||||
|
||||
func encodeHex(dst []byte, uuid UUID) {
|
||||
hex.Encode(dst, uuid[:4])
|
||||
dst[8] = '-'
|
||||
hex.Encode(dst[9:13], uuid[4:6])
|
||||
dst[13] = '-'
|
||||
hex.Encode(dst[14:18], uuid[6:8])
|
||||
dst[18] = '-'
|
||||
hex.Encode(dst[19:23], uuid[8:10])
|
||||
dst[23] = '-'
|
||||
hex.Encode(dst[24:], uuid[10:])
|
||||
}
|
||||
|
||||
// Variant returns the variant encoded in uuid.
|
||||
func (uuid UUID) Variant() Variant {
|
||||
switch {
|
||||
case (uuid[8] & 0xc0) == 0x80:
|
||||
return RFC4122
|
||||
case (uuid[8] & 0xe0) == 0xc0:
|
||||
return Microsoft
|
||||
case (uuid[8] & 0xe0) == 0xe0:
|
||||
return Future
|
||||
default:
|
||||
return Reserved
|
||||
}
|
||||
}
|
||||
|
||||
// Version returns the version of uuid.
|
||||
func (uuid UUID) Version() Version {
|
||||
return Version(uuid[6] >> 4)
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
if v > 15 {
|
||||
return fmt.Sprintf("BAD_VERSION_%d", v)
|
||||
}
|
||||
return fmt.Sprintf("VERSION_%d", v)
|
||||
}
|
||||
|
||||
func (v Variant) String() string {
|
||||
switch v {
|
||||
case RFC4122:
|
||||
return "RFC4122"
|
||||
case Reserved:
|
||||
return "Reserved"
|
||||
case Microsoft:
|
||||
return "Microsoft"
|
||||
case Future:
|
||||
return "Future"
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
}
|
||||
return fmt.Sprintf("BadVariant%d", int(v))
|
||||
}
|
||||
|
||||
// SetRand sets the random number generator to r, which implements io.Reader.
|
||||
// If r.Read returns an error when the package requests random data then
|
||||
// a panic will be issued.
|
||||
//
|
||||
// Calling SetRand with nil sets the random number generator to the default
|
||||
// generator.
|
||||
func SetRand(r io.Reader) {
|
||||
if r == nil {
|
||||
rander = rand.Reader
|
||||
return
|
||||
}
|
||||
rander = r
|
||||
}
|
44
vendor/github.com/google/uuid/version1.go
generated
vendored
Normal file
44
vendor/github.com/google/uuid/version1.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewUUID returns nil. If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewUUID returns nil and an error.
|
||||
//
|
||||
// In most cases, New should be used.
|
||||
func NewUUID() (UUID, error) {
|
||||
var uuid UUID
|
||||
now, seq, err := GetTime()
|
||||
if err != nil {
|
||||
return uuid, err
|
||||
}
|
||||
|
||||
timeLow := uint32(now & 0xffffffff)
|
||||
timeMid := uint16((now >> 32) & 0xffff)
|
||||
timeHi := uint16((now >> 48) & 0x0fff)
|
||||
timeHi |= 0x1000 // Version 1
|
||||
|
||||
binary.BigEndian.PutUint32(uuid[0:], timeLow)
|
||||
binary.BigEndian.PutUint16(uuid[4:], timeMid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], timeHi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], seq)
|
||||
|
||||
nodeMu.Lock()
|
||||
if nodeID == zeroID {
|
||||
setNodeInterface("")
|
||||
}
|
||||
copy(uuid[10:], nodeID[:])
|
||||
nodeMu.Unlock()
|
||||
|
||||
return uuid, nil
|
||||
}
|
51
vendor/github.com/google/uuid/version4.go
generated
vendored
Normal file
51
vendor/github.com/google/uuid/version4.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "io"
|
||||
|
||||
// New creates a new random UUID or panics. New is equivalent to
|
||||
// the expression
|
||||
//
|
||||
// uuid.Must(uuid.NewRandom())
|
||||
func New() UUID {
|
||||
return Must(NewRandom())
|
||||
}
|
||||
|
||||
// NewString creates a new random UUID and returns it as a string or panics.
|
||||
// NewString is equivalent to the expression
|
||||
//
|
||||
// uuid.New().String()
|
||||
func NewString() string {
|
||||
return Must(NewRandom()).String()
|
||||
}
|
||||
|
||||
// NewRandom returns a Random (Version 4) UUID.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// A note about uniqueness derived from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
||||
// means the probability is about 0.00000000006 (6 × 10−11),
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() (UUID, error) {
|
||||
return NewRandomFromReader(rander)
|
||||
}
|
||||
|
||||
// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
|
||||
func NewRandomFromReader(r io.Reader) (UUID, error) {
|
||||
var uuid UUID
|
||||
_, err := io.ReadFull(r, uuid[:])
|
||||
if err != nil {
|
||||
return Nil, err
|
||||
}
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid, nil
|
||||
}
|
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
25
vendor/github.com/gorilla/websocket/.gitignore
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
.idea/
|
||||
*.iml
|
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
9
vendor/github.com/gorilla/websocket/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
# This is the official list of Gorilla WebSocket authors for copyright
|
||||
# purposes.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Gary Burd <gary@beagledreams.com>
|
||||
Google LLC (https://opensource.google.com/)
|
||||
Joachim Bauch <mail@joachim-bauch.de>
|
||||
|
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
22
vendor/github.com/gorilla/websocket/LICENSE
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
64
vendor/github.com/gorilla/websocket/README.md
generated
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
# Gorilla WebSocket
|
||||
|
||||
[](https://godoc.org/github.com/gorilla/websocket)
|
||||
[](https://circleci.com/gh/gorilla/websocket)
|
||||
|
||||
Gorilla WebSocket is a [Go](http://golang.org/) implementation of the
|
||||
[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol.
|
||||
|
||||
### Documentation
|
||||
|
||||
* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc)
|
||||
* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat)
|
||||
* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command)
|
||||
* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo)
|
||||
* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch)
|
||||
|
||||
### Status
|
||||
|
||||
The Gorilla WebSocket package provides a complete and tested implementation of
|
||||
the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The
|
||||
package API is stable.
|
||||
|
||||
### Installation
|
||||
|
||||
go get github.com/gorilla/websocket
|
||||
|
||||
### Protocol Compliance
|
||||
|
||||
The Gorilla WebSocket package passes the server tests in the [Autobahn Test
|
||||
Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn
|
||||
subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn).
|
||||
|
||||
### Gorilla WebSocket compared with other packages
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<th></th>
|
||||
<th><a href="http://godoc.org/github.com/gorilla/websocket">github.com/gorilla</a></th>
|
||||
<th><a href="http://godoc.org/golang.org/x/net/websocket">golang.org/x/net</a></th>
|
||||
</tr>
|
||||
<tr>
|
||||
<tr><td colspan="3"><a href="http://tools.ietf.org/html/rfc6455">RFC 6455</a> Features</td></tr>
|
||||
<tr><td>Passes <a href="https://github.com/crossbario/autobahn-testsuite">Autobahn Test Suite</a></td><td><a href="https://github.com/gorilla/websocket/tree/master/examples/autobahn">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Receive <a href="https://tools.ietf.org/html/rfc6455#section-5.4">fragmented</a> message<td>Yes</td><td><a href="https://code.google.com/p/go/issues/detail?id=7632">No</a>, see note 1</td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.1">close</a> message</td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td><a href="https://code.google.com/p/go/issues/detail?id=4588">No</a></td></tr>
|
||||
<tr><td>Send <a href="https://tools.ietf.org/html/rfc6455#section-5.5.2">pings</a> and receive <a href="https://tools.ietf.org/html/rfc6455#section-5.5.3">pongs</a></td><td><a href="http://godoc.org/github.com/gorilla/websocket#hdr-Control_Messages">Yes</a></td><td>No</td></tr>
|
||||
<tr><td>Get the <a href="https://tools.ietf.org/html/rfc6455#section-5.6">type</a> of a received data message</td><td>Yes</td><td>Yes, see note 2</td></tr>
|
||||
<tr><td colspan="3">Other Features</tr></td>
|
||||
<tr><td><a href="https://tools.ietf.org/html/rfc7692">Compression Extensions</a></td><td>Experimental</td><td>No</td></tr>
|
||||
<tr><td>Read message using io.Reader</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextReader">Yes</a></td><td>No, see note 3</td></tr>
|
||||
<tr><td>Write message using io.WriteCloser</td><td><a href="http://godoc.org/github.com/gorilla/websocket#Conn.NextWriter">Yes</a></td><td>No, see note 3</td></tr>
|
||||
</table>
|
||||
|
||||
Notes:
|
||||
|
||||
1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html).
|
||||
2. The application can get the type of a received data message by implementing
|
||||
a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal)
|
||||
function.
|
||||
3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries.
|
||||
Read returns when the input buffer is full or a frame boundary is
|
||||
encountered. Each call to Write sends a single frame message. The Gorilla
|
||||
io.Reader and io.WriteCloser operate on a single WebSocket message.
|
||||
|
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
395
vendor/github.com/gorilla/websocket/client.go
generated
vendored
Normal file
@ -0,0 +1,395 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrBadHandshake is returned when the server response to opening handshake is
|
||||
// invalid.
|
||||
var ErrBadHandshake = errors.New("websocket: bad handshake")
|
||||
|
||||
var errInvalidCompression = errors.New("websocket: invalid compression negotiation")
|
||||
|
||||
// NewClient creates a new client connection using the given net connection.
|
||||
// The URL u specifies the host and request URI. Use requestHeader to specify
|
||||
// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies
|
||||
// (Cookie). Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etc.
|
||||
//
|
||||
// Deprecated: Use Dialer instead.
|
||||
func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) {
|
||||
d := Dialer{
|
||||
ReadBufferSize: readBufSize,
|
||||
WriteBufferSize: writeBufSize,
|
||||
NetDial: func(net, addr string) (net.Conn, error) {
|
||||
return netConn, nil
|
||||
},
|
||||
}
|
||||
return d.Dial(u.String(), requestHeader)
|
||||
}
|
||||
|
||||
// A Dialer contains options for connecting to WebSocket server.
|
||||
type Dialer struct {
|
||||
// NetDial specifies the dial function for creating TCP connections. If
|
||||
// NetDial is nil, net.Dial is used.
|
||||
NetDial func(network, addr string) (net.Conn, error)
|
||||
|
||||
// NetDialContext specifies the dial function for creating TCP connections. If
|
||||
// NetDialContext is nil, net.DialContext is used.
|
||||
NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// Proxy specifies a function to return a proxy for a given
|
||||
// Request. If the function returns a non-nil error, the
|
||||
// request is aborted with the provided error.
|
||||
// If Proxy is nil or returns a nil *URL, no proxy is used.
|
||||
Proxy func(*http.Request) (*url.URL, error)
|
||||
|
||||
// TLSClientConfig specifies the TLS configuration to use with tls.Client.
|
||||
// If nil, the default configuration is used.
|
||||
TLSClientConfig *tls.Config
|
||||
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then a useful default size is used. The I/O buffer sizes
|
||||
// do not limit the size of the messages that can be sent or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the client's requested subprotocols.
|
||||
Subprotocols []string
|
||||
|
||||
// EnableCompression specifies if the client should attempt to negotiate
|
||||
// per message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
|
||||
// Jar specifies the cookie jar.
|
||||
// If Jar is nil, cookies are not sent in requests and ignored
|
||||
// in responses.
|
||||
Jar http.CookieJar
|
||||
}
|
||||
|
||||
// Dial creates a new client connection by calling DialContext with a background context.
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
return d.DialContext(context.Background(), urlStr, requestHeader)
|
||||
}
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") {
|
||||
hostNoPort = hostNoPort[:i]
|
||||
} else {
|
||||
switch u.Scheme {
|
||||
case "wss":
|
||||
hostPort += ":443"
|
||||
case "https":
|
||||
hostPort += ":443"
|
||||
default:
|
||||
hostPort += ":80"
|
||||
}
|
||||
}
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer = *DefaultDialer
|
||||
|
||||
// DialContext creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
// (Sec-WebSocket-Protocol) and cookies (Set-Cookie).
|
||||
//
|
||||
// The context will be used in the request and in the Dialer.
|
||||
//
|
||||
// If the WebSocket handshake fails, ErrBadHandshake is returned along with a
|
||||
// non-nil *http.Response so that callers can handle redirects, authentication,
|
||||
// etcetera. The response body may not contain the entire response and does not
|
||||
// need to be closed by the application.
|
||||
func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
if d == nil {
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "ws":
|
||||
u.Scheme = "http"
|
||||
case "wss":
|
||||
u.Scheme = "https"
|
||||
default:
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
// User name and password are not allowed in websocket URIs.
|
||||
return nil, nil, errMalformedURL
|
||||
}
|
||||
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: u,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Host: u.Host,
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
// Set the cookies present in the cookie jar of the dialer
|
||||
if d.Jar != nil {
|
||||
for _, cookie := range d.Jar.Cookies(u) {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
}
|
||||
|
||||
// Set the request headers using the capitalization for names and values in
|
||||
// RFC examples. Although the capitalization shouldn't matter, there are
|
||||
// servers that depend on it. The Header.Set method is not used because the
|
||||
// method canonicalizes the header names.
|
||||
req.Header["Upgrade"] = []string{"websocket"}
|
||||
req.Header["Connection"] = []string{"Upgrade"}
|
||||
req.Header["Sec-WebSocket-Key"] = []string{challengeKey}
|
||||
req.Header["Sec-WebSocket-Version"] = []string{"13"}
|
||||
if len(d.Subprotocols) > 0 {
|
||||
req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")}
|
||||
}
|
||||
for k, vs := range requestHeader {
|
||||
switch {
|
||||
case k == "Host":
|
||||
if len(vs) > 0 {
|
||||
req.Host = vs[0]
|
||||
}
|
||||
case k == "Upgrade" ||
|
||||
k == "Connection" ||
|
||||
k == "Sec-Websocket-Key" ||
|
||||
k == "Sec-Websocket-Version" ||
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
}
|
||||
|
||||
if d.EnableCompression {
|
||||
req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"}
|
||||
}
|
||||
|
||||
if d.HandshakeTimeout != 0 {
|
||||
var cancel func()
|
||||
ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
var netDial func(network, add string) (net.Conn, error)
|
||||
|
||||
if d.NetDialContext != nil {
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return d.NetDialContext(ctx, network, addr)
|
||||
}
|
||||
} else if d.NetDial != nil {
|
||||
netDial = d.NetDial
|
||||
} else {
|
||||
netDialer := &net.Dialer{}
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
trace := httptrace.ContextClientTrace(ctx)
|
||||
if trace != nil && trace.GetConn != nil {
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if trace != nil && trace.GotConn != nil {
|
||||
trace.GotConn(httptrace.GotConnInfo{
|
||||
Conn: netConn,
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if netConn != nil {
|
||||
netConn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
cfg.ServerName = hostNoPort
|
||||
}
|
||||
tlsConn := tls.Client(netConn, cfg)
|
||||
netConn = tlsConn
|
||||
|
||||
var err error
|
||||
if trace != nil {
|
||||
err = doHandshakeWithTrace(trace, tlsConn, cfg)
|
||||
} else {
|
||||
err = doHandshake(tlsConn, cfg)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil)
|
||||
|
||||
if err := req.Write(netConn); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if trace != nil && trace.GotFirstResponseByte != nil {
|
||||
if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 {
|
||||
trace.GotFirstResponseByte()
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := http.ReadResponse(conn.br, req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if d.Jar != nil {
|
||||
if rc := resp.Cookies(); len(rc) > 0 {
|
||||
d.Jar.SetCookies(u, rc)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.StatusCode != 101 ||
|
||||
!strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") ||
|
||||
!strings.EqualFold(resp.Header.Get("Connection"), "upgrade") ||
|
||||
resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) {
|
||||
// Before closing the network connection on return from this
|
||||
// function, slurp up some of the response to aid application
|
||||
// debugging.
|
||||
buf := make([]byte, 1024)
|
||||
n, _ := io.ReadFull(resp.Body, buf)
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n]))
|
||||
return nil, resp, ErrBadHandshake
|
||||
}
|
||||
|
||||
for _, ext := range parseExtensions(resp.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
_, snct := ext["server_no_context_takeover"]
|
||||
_, cnct := ext["client_no_context_takeover"]
|
||||
if !snct || !cnct {
|
||||
return nil, resp, errInvalidCompression
|
||||
}
|
||||
conn.newCompressionWriter = compressNoContextTakeover
|
||||
conn.newDecompressionReader = decompressNoContextTakeover
|
||||
break
|
||||
}
|
||||
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{}))
|
||||
conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol")
|
||||
|
||||
netConn.SetDeadline(time.Time{})
|
||||
netConn = nil // to avoid close in defer.
|
||||
return conn, resp, nil
|
||||
}
|
||||
|
||||
func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if err := tlsConn.Handshake(); err != nil {
|
||||
return err
|
||||
}
|
||||
if !cfg.InsecureSkipVerify {
|
||||
if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
16
vendor/github.com/gorilla/websocket/client_clone.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return cfg.Clone()
|
||||
}
|
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
38
vendor/github.com/gorilla/websocket/client_clone_legacy.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// cloneTLSConfig clones all public fields except the fields
|
||||
// SessionTicketsDisabled and SessionTicketKey. This avoids copying the
|
||||
// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a
|
||||
// config in active use.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
return &tls.Config{
|
||||
Rand: cfg.Rand,
|
||||
Time: cfg.Time,
|
||||
Certificates: cfg.Certificates,
|
||||
NameToCertificate: cfg.NameToCertificate,
|
||||
GetCertificate: cfg.GetCertificate,
|
||||
RootCAs: cfg.RootCAs,
|
||||
NextProtos: cfg.NextProtos,
|
||||
ServerName: cfg.ServerName,
|
||||
ClientAuth: cfg.ClientAuth,
|
||||
ClientCAs: cfg.ClientCAs,
|
||||
InsecureSkipVerify: cfg.InsecureSkipVerify,
|
||||
CipherSuites: cfg.CipherSuites,
|
||||
PreferServerCipherSuites: cfg.PreferServerCipherSuites,
|
||||
ClientSessionCache: cfg.ClientSessionCache,
|
||||
MinVersion: cfg.MinVersion,
|
||||
MaxVersion: cfg.MaxVersion,
|
||||
CurvePreferences: cfg.CurvePreferences,
|
||||
}
|
||||
}
|
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/websocket/compression.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6
|
||||
maxCompressionLevel = flate.BestCompression
|
||||
defaultCompressionLevel = 1
|
||||
)
|
||||
|
||||
var (
|
||||
flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool
|
||||
flateReaderPool = sync.Pool{New: func() interface{} {
|
||||
return flate.NewReader(nil)
|
||||
}}
|
||||
)
|
||||
|
||||
func decompressNoContextTakeover(r io.Reader) io.ReadCloser {
|
||||
const tail =
|
||||
// Add four bytes as specified in RFC
|
||||
"\x00\x00\xff\xff" +
|
||||
// Add final block to squelch unexpected EOF error from flate reader.
|
||||
"\x01\x00\x00\xff\xff"
|
||||
|
||||
fr, _ := flateReaderPool.Get().(io.ReadCloser)
|
||||
fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil)
|
||||
return &flateReadWrapper{fr}
|
||||
}
|
||||
|
||||
func isValidCompressionLevel(level int) bool {
|
||||
return minCompressionLevel <= level && level <= maxCompressionLevel
|
||||
}
|
||||
|
||||
func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser {
|
||||
p := &flateWriterPools[level-minCompressionLevel]
|
||||
tw := &truncWriter{w: w}
|
||||
fw, _ := p.Get().(*flate.Writer)
|
||||
if fw == nil {
|
||||
fw, _ = flate.NewWriter(tw, level)
|
||||
} else {
|
||||
fw.Reset(tw)
|
||||
}
|
||||
return &flateWriteWrapper{fw: fw, tw: tw, p: p}
|
||||
}
|
||||
|
||||
// truncWriter is an io.Writer that writes all but the last four bytes of the
|
||||
// stream to another io.Writer.
|
||||
type truncWriter struct {
|
||||
w io.WriteCloser
|
||||
n int
|
||||
p [4]byte
|
||||
}
|
||||
|
||||
func (w *truncWriter) Write(p []byte) (int, error) {
|
||||
n := 0
|
||||
|
||||
// fill buffer first for simplicity.
|
||||
if w.n < len(w.p) {
|
||||
n = copy(w.p[w.n:], p)
|
||||
p = p[n:]
|
||||
w.n += n
|
||||
if len(p) == 0 {
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
|
||||
m := len(p)
|
||||
if m > len(w.p) {
|
||||
m = len(w.p)
|
||||
}
|
||||
|
||||
if nn, err := w.w.Write(w.p[:m]); err != nil {
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
copy(w.p[:], w.p[m:])
|
||||
copy(w.p[len(w.p)-m:], p[len(p)-m:])
|
||||
nn, err := w.w.Write(p[:len(p)-m])
|
||||
return n + nn, err
|
||||
}
|
||||
|
||||
type flateWriteWrapper struct {
|
||||
fw *flate.Writer
|
||||
tw *truncWriter
|
||||
p *sync.Pool
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Write(p []byte) (int, error) {
|
||||
if w.fw == nil {
|
||||
return 0, errWriteClosed
|
||||
}
|
||||
return w.fw.Write(p)
|
||||
}
|
||||
|
||||
func (w *flateWriteWrapper) Close() error {
|
||||
if w.fw == nil {
|
||||
return errWriteClosed
|
||||
}
|
||||
err1 := w.fw.Flush()
|
||||
w.p.Put(w.fw)
|
||||
w.fw = nil
|
||||
if w.tw.p != [4]byte{0, 0, 0xff, 0xff} {
|
||||
return errors.New("websocket: internal error, unexpected bytes at end of flate stream")
|
||||
}
|
||||
err2 := w.tw.w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
type flateReadWrapper struct {
|
||||
fr io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Read(p []byte) (int, error) {
|
||||
if r.fr == nil {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
n, err := r.fr.Read(p)
|
||||
if err == io.EOF {
|
||||
// Preemptively place the reader back in the pool. This helps with
|
||||
// scenarios where the application does not call NextReader() soon after
|
||||
// this final read.
|
||||
r.Close()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *flateReadWrapper) Close() error {
|
||||
if r.fr == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
err := r.fr.Close()
|
||||
flateReaderPool.Put(r.fr)
|
||||
r.fr = nil
|
||||
return err
|
||||
}
|
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
1201
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "net"
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
b := net.Buffers(bufs)
|
||||
_, err := b.WriteTo(c.conn)
|
||||
return err
|
||||
}
|
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
if _, err := c.conn.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
227
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
Normal file
@ -0,0 +1,227 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package websocket implements the WebSocket protocol defined in RFC 6455.
|
||||
//
|
||||
// Overview
|
||||
//
|
||||
// The Conn type represents a WebSocket connection. A server application calls
|
||||
// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// ReadBufferSize: 1024,
|
||||
// WriteBufferSize: 1024,
|
||||
// }
|
||||
//
|
||||
// func handler(w http.ResponseWriter, r *http.Request) {
|
||||
// conn, err := upgrader.Upgrade(w, r, nil)
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// ... Use conn to send and receive messages.
|
||||
// }
|
||||
//
|
||||
// Call the connection's WriteMessage and ReadMessage methods to send and
|
||||
// receive messages as a slice of bytes. This snippet of code shows how to echo
|
||||
// messages using these methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// In above snippet of code, p is a []byte and messageType is an int with value
|
||||
// websocket.BinaryMessage or websocket.TextMessage.
|
||||
//
|
||||
// An application can also send and receive messages using the io.WriteCloser
|
||||
// and io.Reader interfaces. To send a message, call the connection NextWriter
|
||||
// method to get an io.WriteCloser, write the message to the writer and close
|
||||
// the writer when done. To receive a message, call the connection NextReader
|
||||
// method to get an io.Reader and read until io.EOF is returned. This snippet
|
||||
// shows how to echo messages using the NextWriter and NextReader methods:
|
||||
//
|
||||
// for {
|
||||
// messageType, r, err := conn.NextReader()
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
// w, err := conn.NextWriter(messageType)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if _, err := io.Copy(w, r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err := w.Close(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Data Messages
|
||||
//
|
||||
// The WebSocket protocol distinguishes between text and binary data messages.
|
||||
// Text messages are interpreted as UTF-8 encoded text. The interpretation of
|
||||
// binary messages is left to the application.
|
||||
//
|
||||
// This package uses the TextMessage and BinaryMessage integer constants to
|
||||
// identify the two data message types. The ReadMessage and NextReader methods
|
||||
// return the type of the received message. The messageType argument to the
|
||||
// WriteMessage and NextWriter methods specifies the type of a sent message.
|
||||
//
|
||||
// It is the application's responsibility to ensure that text messages are
|
||||
// valid UTF-8 encoded text.
|
||||
//
|
||||
// Control Messages
|
||||
//
|
||||
// The WebSocket protocol defines three types of control messages: close, ping
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
//
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
//
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
//
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
//
|
||||
// func readLoop(c *websocket.Conn) {
|
||||
// for {
|
||||
// if _, _, err := c.NextReader(); err != nil {
|
||||
// c.Close()
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Concurrency
|
||||
//
|
||||
// Connections support one concurrent reader and one concurrent writer.
|
||||
//
|
||||
// Applications are responsible for ensuring that no more than one goroutine
|
||||
// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,
|
||||
// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and
|
||||
// that no more than one goroutine calls the read methods (NextReader,
|
||||
// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)
|
||||
// concurrently.
|
||||
//
|
||||
// The Close and WriteControl methods can be called concurrently with all other
|
||||
// methods.
|
||||
//
|
||||
// Origin Considerations
|
||||
//
|
||||
// Web browsers allow Javascript applications to open a WebSocket connection to
|
||||
// any host. It's up to the server to enforce an origin policy using the Origin
|
||||
// request header sent by the browser.
|
||||
//
|
||||
// The Upgrader calls the function specified in the CheckOrigin field to check
|
||||
// the origin. If the CheckOrigin function returns false, then the Upgrade
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
// before calling the Upgrade function.
|
||||
//
|
||||
// Buffers
|
||||
//
|
||||
// Connections buffer network input and output to reduce the number
|
||||
// of system calls when reading or writing messages.
|
||||
//
|
||||
// Write buffers are also used for constructing WebSocket frames. See RFC 6455,
|
||||
// Section 5 for a discussion of message framing. A WebSocket frame header is
|
||||
// written to the network each time a write buffer is flushed to the network.
|
||||
// Decreasing the size of the write buffer can increase the amount of framing
|
||||
// overhead on the connection.
|
||||
//
|
||||
// The buffer sizes in bytes are specified by the ReadBufferSize and
|
||||
// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default
|
||||
// size of 4096 when a buffer size field is set to zero. The Upgrader reuses
|
||||
// buffers created by the HTTP server when a buffer size field is set to zero.
|
||||
// The HTTP server buffers have a size of 4096 at the time of this writing.
|
||||
//
|
||||
// The buffer sizes do not limit the size of a message that can be read or
|
||||
// written by a connection.
|
||||
//
|
||||
// Buffers are held for the lifetime of the connection by default. If the
|
||||
// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the
|
||||
// write buffer only when writing a message.
|
||||
//
|
||||
// Applications should tune the buffer sizes to balance memory use and
|
||||
// performance. Increasing the buffer size uses more memory, but can reduce the
|
||||
// number of system calls to read or write the network. In the case of writing,
|
||||
// increasing the buffer size can reduce the number of frame headers written to
|
||||
// the network.
|
||||
//
|
||||
// Some guidelines for setting buffer parameters are:
|
||||
//
|
||||
// Limit the buffer sizes to the maximum expected message size. Buffers larger
|
||||
// than the largest message do not provide any benefit.
|
||||
//
|
||||
// Depending on the distribution of message sizes, setting the buffer size to
|
||||
// a value less than the maximum expected message size can greatly reduce memory
|
||||
// use with a small impact on performance. Here's an example: If 99% of the
|
||||
// messages are smaller than 256 bytes and the maximum message size is 512
|
||||
// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls
|
||||
// than a buffer size of 512 bytes. The memory savings is 50%.
|
||||
//
|
||||
// A write buffer pool is useful when the application has a modest number
|
||||
// writes over a large number of connections. when buffers are pooled, a larger
|
||||
// buffer size has a reduced impact on total memory use and has the benefit of
|
||||
// reducing system calls and frame overhead.
|
||||
//
|
||||
// Compression EXPERIMENTAL
|
||||
//
|
||||
// Per message compression extensions (RFC 7692) are experimentally supported
|
||||
// by this package in a limited capacity. Setting the EnableCompression option
|
||||
// to true in Dialer or Upgrader will attempt to negotiate per message deflate
|
||||
// support.
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// EnableCompression: true,
|
||||
// }
|
||||
//
|
||||
// If compression was successfully negotiated with the connection's peer, any
|
||||
// message received in compressed form will be automatically decompressed.
|
||||
// All Read methods will return uncompressed bytes.
|
||||
//
|
||||
// Per message compression of messages written to a connection can be enabled
|
||||
// or disabled by calling the corresponding Conn method:
|
||||
//
|
||||
// conn.EnableWriteCompression(false)
|
||||
//
|
||||
// Currently this package does not support compression with "context takeover".
|
||||
// This means that messages must be compressed and decompressed in isolation,
|
||||
// without retaining sliding window or dictionary state across messages. For
|
||||
// more details refer to RFC 7692.
|
||||
//
|
||||
// Use of compression is experimental and may result in decreased performance.
|
||||
package websocket
|
3
vendor/github.com/gorilla/websocket/go.mod
generated
vendored
Normal file
3
vendor/github.com/gorilla/websocket/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/gorilla/websocket
|
||||
|
||||
go 1.12
|
0
vendor/github.com/gorilla/websocket/go.sum
generated
vendored
Normal file
0
vendor/github.com/gorilla/websocket/go.sum
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
42
vendor/github.com/gorilla/websocket/join.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// JoinMessages concatenates received messages to create a single io.Reader.
|
||||
// The string term is appended to each message. The returned reader does not
|
||||
// support concurrent calls to the Read method.
|
||||
func JoinMessages(c *Conn, term string) io.Reader {
|
||||
return &joinReader{c: c, term: term}
|
||||
}
|
||||
|
||||
type joinReader struct {
|
||||
c *Conn
|
||||
term string
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (r *joinReader) Read(p []byte) (int, error) {
|
||||
if r.r == nil {
|
||||
var err error
|
||||
_, r.r, err = r.c.NextReader()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if r.term != "" {
|
||||
r.r = io.MultiReader(r.r, strings.NewReader(r.term))
|
||||
}
|
||||
}
|
||||
n, err := r.r.Read(p)
|
||||
if err == io.EOF {
|
||||
err = nil
|
||||
r.r = nil
|
||||
}
|
||||
return n, err
|
||||
}
|
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
60
vendor/github.com/gorilla/websocket/json.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// Deprecated: Use c.WriteJSON instead.
|
||||
func WriteJSON(c *Conn, v interface{}) error {
|
||||
return c.WriteJSON(v)
|
||||
}
|
||||
|
||||
// WriteJSON writes the JSON encoding of v as a message.
|
||||
//
|
||||
// See the documentation for encoding/json Marshal for details about the
|
||||
// conversion of Go values to JSON.
|
||||
func (c *Conn) WriteJSON(v interface{}) error {
|
||||
w, err := c.NextWriter(TextMessage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := json.NewEncoder(w).Encode(v)
|
||||
err2 := w.Close()
|
||||
if err1 != nil {
|
||||
return err1
|
||||
}
|
||||
return err2
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// Deprecated: Use c.ReadJSON instead.
|
||||
func ReadJSON(c *Conn, v interface{}) error {
|
||||
return c.ReadJSON(v)
|
||||
}
|
||||
|
||||
// ReadJSON reads the next JSON-encoded message from the connection and stores
|
||||
// it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for the encoding/json Unmarshal function for details
|
||||
// about the conversion of JSON to a Go value.
|
||||
func (c *Conn) ReadJSON(v interface{}) error {
|
||||
_, r, err := c.NextReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = json.NewDecoder(r).Decode(v)
|
||||
if err == io.EOF {
|
||||
// One value is expected in the message.
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
54
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package websocket
|
||||
|
||||
import "unsafe"
|
||||
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
||||
|
||||
// Mask one byte at a time to word boundary.
|
||||
if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 {
|
||||
n = wordSize - n
|
||||
for i := range b[:n] {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
|
||||
// Create aligned word size key.
|
||||
var k [wordSize]byte
|
||||
for i := range k {
|
||||
k[i] = key[(pos+i)&3]
|
||||
}
|
||||
kw := *(*uintptr)(unsafe.Pointer(&k))
|
||||
|
||||
// Mask one word at a time.
|
||||
n := (len(b) / wordSize) * wordSize
|
||||
for i := 0; i < n; i += wordSize {
|
||||
*(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw
|
||||
}
|
||||
|
||||
// Mask one byte at a time for remaining bytes.
|
||||
b = b[n:]
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
|
||||
return pos & 3
|
||||
}
|
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/mask_safe.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of
|
||||
// this source code is governed by a BSD-style license that can be found in the
|
||||
// LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package websocket
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
for i := range b {
|
||||
b[i] ^= key[pos&3]
|
||||
pos++
|
||||
}
|
||||
return pos & 3
|
||||
}
|
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
102
vendor/github.com/gorilla/websocket/prepared.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PreparedMessage caches on the wire representations of a message payload.
|
||||
// Use PreparedMessage to efficiently send a message payload to multiple
|
||||
// connections. PreparedMessage is especially useful when compression is used
|
||||
// because the CPU and memory expensive compression operation can be executed
|
||||
// once for a given set of compression options.
|
||||
type PreparedMessage struct {
|
||||
messageType int
|
||||
data []byte
|
||||
mu sync.Mutex
|
||||
frames map[prepareKey]*preparedFrame
|
||||
}
|
||||
|
||||
// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage.
|
||||
type prepareKey struct {
|
||||
isServer bool
|
||||
compress bool
|
||||
compressionLevel int
|
||||
}
|
||||
|
||||
// preparedFrame contains data in wire representation.
|
||||
type preparedFrame struct {
|
||||
once sync.Once
|
||||
data []byte
|
||||
}
|
||||
|
||||
// NewPreparedMessage returns an initialized PreparedMessage. You can then send
|
||||
// it to connection using WritePreparedMessage method. Valid wire
|
||||
// representation will be calculated lazily only once for a set of current
|
||||
// connection options.
|
||||
func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) {
|
||||
pm := &PreparedMessage{
|
||||
messageType: messageType,
|
||||
frames: make(map[prepareKey]*preparedFrame),
|
||||
data: data,
|
||||
}
|
||||
|
||||
// Prepare a plain server frame.
|
||||
_, frameData, err := pm.frame(prepareKey{isServer: true, compress: false})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// To protect against caller modifying the data argument, remember the data
|
||||
// copied to the plain server frame.
|
||||
pm.data = frameData[len(frameData)-len(data):]
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) {
|
||||
pm.mu.Lock()
|
||||
frame, ok := pm.frames[key]
|
||||
if !ok {
|
||||
frame = &preparedFrame{}
|
||||
pm.frames[key] = frame
|
||||
}
|
||||
pm.mu.Unlock()
|
||||
|
||||
var err error
|
||||
frame.once.Do(func() {
|
||||
// Prepare a frame using a 'fake' connection.
|
||||
// TODO: Refactor code in conn.go to allow more direct construction of
|
||||
// the frame.
|
||||
mu := make(chan struct{}, 1)
|
||||
mu <- struct{}{}
|
||||
var nc prepareConn
|
||||
c := &Conn{
|
||||
conn: &nc,
|
||||
mu: mu,
|
||||
isServer: key.isServer,
|
||||
compressionLevel: key.compressionLevel,
|
||||
enableWriteCompression: true,
|
||||
writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize),
|
||||
}
|
||||
if key.compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
}
|
||||
err = c.WriteMessage(pm.messageType, pm.data)
|
||||
frame.data = nc.buf.Bytes()
|
||||
})
|
||||
return pm.messageType, frame.data, err
|
||||
}
|
||||
|
||||
type prepareConn struct {
|
||||
buf bytes.Buffer
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) }
|
||||
func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil }
|
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(network, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
forwardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.forwardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
363
vendor/github.com/gorilla/websocket/server.go
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HandshakeError describes an error with the handshake from the peer.
|
||||
type HandshakeError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e HandshakeError) Error() string { return e.message }
|
||||
|
||||
// Upgrader specifies parameters for upgrading an HTTP connection to a
|
||||
// WebSocket connection.
|
||||
type Upgrader struct {
|
||||
// HandshakeTimeout specifies the duration for the handshake to complete.
|
||||
HandshakeTimeout time.Duration
|
||||
|
||||
// ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer
|
||||
// size is zero, then buffers allocated by the HTTP server are used. The
|
||||
// I/O buffer sizes do not limit the size of the messages that can be sent
|
||||
// or received.
|
||||
ReadBufferSize, WriteBufferSize int
|
||||
|
||||
// WriteBufferPool is a pool of buffers for write operations. If the value
|
||||
// is not set, then write buffers are allocated to the connection for the
|
||||
// lifetime of the connection.
|
||||
//
|
||||
// A pool is most useful when the application has a modest volume of writes
|
||||
// across a large number of connections.
|
||||
//
|
||||
// Applications should use a single pool for each unique value of
|
||||
// WriteBufferSize.
|
||||
WriteBufferPool BufferPool
|
||||
|
||||
// Subprotocols specifies the server's supported protocols in order of
|
||||
// preference. If this field is not nil, then the Upgrade method negotiates a
|
||||
// subprotocol by selecting the first match in this list with a protocol
|
||||
// requested by the client. If there's no match, then no protocol is
|
||||
// negotiated (the Sec-Websocket-Protocol header is not included in the
|
||||
// handshake response).
|
||||
Subprotocols []string
|
||||
|
||||
// Error specifies the function for generating HTTP error responses. If Error
|
||||
// is nil, then http.Error is used to generate the HTTP response.
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
// message compression (RFC 7692). Setting this value to true does not
|
||||
// guarantee that compression will be supported. Currently only "no context
|
||||
// takeover" modes are supported.
|
||||
EnableCompression bool
|
||||
}
|
||||
|
||||
func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) {
|
||||
err := HandshakeError{reason}
|
||||
if u.Error != nil {
|
||||
u.Error(w, r, status, err)
|
||||
} else {
|
||||
w.Header().Set("Sec-Websocket-Version", "13")
|
||||
http.Error(w, http.StatusText(status), status)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// checkSameOrigin returns true if the origin is not set or is equal to the request host.
|
||||
func checkSameOrigin(r *http.Request) bool {
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
return true
|
||||
}
|
||||
u, err := url.Parse(origin[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
if u.Subprotocols != nil {
|
||||
clientProtocols := Subprotocols(r)
|
||||
for _, serverProtocol := range u.Subprotocols {
|
||||
for _, clientProtocol := range clientProtocols {
|
||||
if clientProtocol == serverProtocol {
|
||||
return clientProtocol
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if responseHeader != nil {
|
||||
return responseHeader.Get("Sec-Websocket-Protocol")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// application negotiated subprotocol (Sec-WebSocket-Protocol).
|
||||
//
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
if challengeKey == "" {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header is missing or blank")
|
||||
}
|
||||
|
||||
subprotocol := u.selectSubprotocol(r, responseHeader)
|
||||
|
||||
// Negotiate PMCE
|
||||
var compress bool
|
||||
if u.EnableCompression {
|
||||
for _, ext := range parseExtensions(r.Header) {
|
||||
if ext[""] != "permessage-deflate" {
|
||||
continue
|
||||
}
|
||||
compress = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
h, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker")
|
||||
}
|
||||
var brw *bufio.ReadWriter
|
||||
netConn, brw, err := h.Hijack()
|
||||
if err != nil {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, err.Error())
|
||||
}
|
||||
|
||||
if brw.Reader.Buffered() > 0 {
|
||||
netConn.Close()
|
||||
return nil, errors.New("websocket: client sent data before handshake is complete")
|
||||
}
|
||||
|
||||
var br *bufio.Reader
|
||||
if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 {
|
||||
// Reuse hijacked buffered reader as connection reader.
|
||||
br = brw.Reader
|
||||
}
|
||||
|
||||
buf := bufioWriterBuffer(netConn, brw.Writer)
|
||||
|
||||
var writeBuf []byte
|
||||
if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 {
|
||||
// Reuse hijacked write buffer as connection buffer.
|
||||
writeBuf = buf
|
||||
}
|
||||
|
||||
c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf)
|
||||
c.subprotocol = subprotocol
|
||||
|
||||
if compress {
|
||||
c.newCompressionWriter = compressNoContextTakeover
|
||||
c.newDecompressionReader = decompressNoContextTakeover
|
||||
}
|
||||
|
||||
// Use larger of hijacked buffer and connection write buffer for header.
|
||||
p := buf
|
||||
if len(c.writeBuf) > len(p) {
|
||||
p = c.writeBuf
|
||||
}
|
||||
p = p[:0]
|
||||
|
||||
p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...)
|
||||
p = append(p, computeAcceptKey(challengeKey)...)
|
||||
p = append(p, "\r\n"...)
|
||||
if c.subprotocol != "" {
|
||||
p = append(p, "Sec-WebSocket-Protocol: "...)
|
||||
p = append(p, c.subprotocol...)
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
if compress {
|
||||
p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...)
|
||||
}
|
||||
for k, vs := range responseHeader {
|
||||
if k == "Sec-Websocket-Protocol" {
|
||||
continue
|
||||
}
|
||||
for _, v := range vs {
|
||||
p = append(p, k...)
|
||||
p = append(p, ": "...)
|
||||
for i := 0; i < len(v); i++ {
|
||||
b := v[i]
|
||||
if b <= 31 {
|
||||
// prevent response splitting.
|
||||
b = ' '
|
||||
}
|
||||
p = append(p, b)
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
}
|
||||
}
|
||||
p = append(p, "\r\n"...)
|
||||
|
||||
// Clear deadlines set by HTTP server.
|
||||
netConn.SetDeadline(time.Time{})
|
||||
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout))
|
||||
}
|
||||
if _, err = netConn.Write(p); err != nil {
|
||||
netConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
if u.HandshakeTimeout > 0 {
|
||||
netConn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Upgrade upgrades the HTTP server connection to the WebSocket protocol.
|
||||
//
|
||||
// Deprecated: Use websocket.Upgrader instead.
|
||||
//
|
||||
// Upgrade does not perform origin checking. The application is responsible for
|
||||
// checking the Origin header before calling Upgrade. An example implementation
|
||||
// of the same origin policy check is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// If the endpoint supports subprotocols, then the application is responsible
|
||||
// for negotiating the protocol used on the connection. Use the Subprotocols()
|
||||
// function to get the subprotocols requested by the client. Use the
|
||||
// Sec-Websocket-Protocol response header to specify the subprotocol selected
|
||||
// by the application.
|
||||
//
|
||||
// The responseHeader is included in the response to the client's upgrade
|
||||
// request. Use the responseHeader to specify cookies (Set-Cookie) and the
|
||||
// negotiated subprotocol (Sec-Websocket-Protocol).
|
||||
//
|
||||
// The connection buffers IO to the underlying network connection. The
|
||||
// readBufSize and writeBufSize parameters specify the size of the buffers to
|
||||
// use. Messages can be larger than the buffers.
|
||||
//
|
||||
// If the request is not a valid WebSocket handshake, then Upgrade returns an
|
||||
// error of type HandshakeError. Applications should handle this error by
|
||||
// replying to the client with an HTTP error response.
|
||||
func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) {
|
||||
u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize}
|
||||
u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) {
|
||||
// don't return errors to maintain backwards compatibility
|
||||
}
|
||||
u.CheckOrigin = func(r *http.Request) bool {
|
||||
// allow all connections by default
|
||||
return true
|
||||
}
|
||||
return u.Upgrade(w, r, responseHeader)
|
||||
}
|
||||
|
||||
// Subprotocols returns the subprotocols requested by the client in the
|
||||
// Sec-Websocket-Protocol header.
|
||||
func Subprotocols(r *http.Request) []string {
|
||||
h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol"))
|
||||
if h == "" {
|
||||
return nil
|
||||
}
|
||||
protocols := strings.Split(h, ",")
|
||||
for i := range protocols {
|
||||
protocols[i] = strings.TrimSpace(protocols[i])
|
||||
}
|
||||
return protocols
|
||||
}
|
||||
|
||||
// IsWebSocketUpgrade returns true if the client requested upgrade to the
|
||||
// WebSocket protocol.
|
||||
func IsWebSocketUpgrade(r *http.Request) bool {
|
||||
return tokenListContainsValue(r.Header, "Connection", "upgrade") &&
|
||||
tokenListContainsValue(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
// bufioReaderSize size returns the size of a bufio.Reader.
|
||||
func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int {
|
||||
// This code assumes that peek on a reset reader returns
|
||||
// bufio.Reader.buf[:0].
|
||||
// TODO: Use bufio.Reader.Size() after Go 1.10
|
||||
br.Reset(originalReader)
|
||||
if p, err := br.Peek(0); err == nil {
|
||||
return cap(p)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeHook is an io.Writer that records the last slice passed to it vio
|
||||
// io.Writer.Write.
|
||||
type writeHook struct {
|
||||
p []byte
|
||||
}
|
||||
|
||||
func (wh *writeHook) Write(p []byte) (int, error) {
|
||||
wh.p = p
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// bufioWriterBuffer grabs the buffer from a bufio.Writer.
|
||||
func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte {
|
||||
// This code assumes that bufio.Writer.buf[:1] is passed to the
|
||||
// bufio.Writer's underlying writer.
|
||||
var wh writeHook
|
||||
bw.Reset(&wh)
|
||||
bw.WriteByte(0)
|
||||
bw.Flush()
|
||||
|
||||
bw.Reset(originalWriter)
|
||||
|
||||
return wh.p[:cap(wh.p)]
|
||||
}
|
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
19
vendor/github.com/gorilla/websocket/trace.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
if trace.TLSHandshakeStart != nil {
|
||||
trace.TLSHandshakeStart()
|
||||
}
|
||||
err := doHandshake(tlsConn, cfg)
|
||||
if trace.TLSHandshakeDone != nil {
|
||||
trace.TLSHandshakeDone(tlsConn.ConnectionState(), err)
|
||||
}
|
||||
return err
|
||||
}
|
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
12
vendor/github.com/gorilla/websocket/trace_17.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http/httptrace"
|
||||
)
|
||||
|
||||
func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error {
|
||||
return doHandshake(tlsConn, cfg)
|
||||
}
|
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
283
vendor/github.com/gorilla/websocket/util.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
||||
// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
|
||||
func computeAcceptKey(challengeKey string) string {
|
||||
h := sha1.New()
|
||||
h.Write([]byte(challengeKey))
|
||||
h.Write(keyGUID)
|
||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
func generateChallengeKey() (string, error) {
|
||||
p := make([]byte, 16)
|
||||
if _, err := io.ReadFull(rand.Reader, p); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(p), nil
|
||||
}
|
||||
|
||||
// Token octets per RFC 2616.
|
||||
var isTokenOctet = [256]bool{
|
||||
'!': true,
|
||||
'#': true,
|
||||
'$': true,
|
||||
'%': true,
|
||||
'&': true,
|
||||
'\'': true,
|
||||
'*': true,
|
||||
'+': true,
|
||||
'-': true,
|
||||
'.': true,
|
||||
'0': true,
|
||||
'1': true,
|
||||
'2': true,
|
||||
'3': true,
|
||||
'4': true,
|
||||
'5': true,
|
||||
'6': true,
|
||||
'7': true,
|
||||
'8': true,
|
||||
'9': true,
|
||||
'A': true,
|
||||
'B': true,
|
||||
'C': true,
|
||||
'D': true,
|
||||
'E': true,
|
||||
'F': true,
|
||||
'G': true,
|
||||
'H': true,
|
||||
'I': true,
|
||||
'J': true,
|
||||
'K': true,
|
||||
'L': true,
|
||||
'M': true,
|
||||
'N': true,
|
||||
'O': true,
|
||||
'P': true,
|
||||
'Q': true,
|
||||
'R': true,
|
||||
'S': true,
|
||||
'T': true,
|
||||
'U': true,
|
||||
'W': true,
|
||||
'V': true,
|
||||
'X': true,
|
||||
'Y': true,
|
||||
'Z': true,
|
||||
'^': true,
|
||||
'_': true,
|
||||
'`': true,
|
||||
'a': true,
|
||||
'b': true,
|
||||
'c': true,
|
||||
'd': true,
|
||||
'e': true,
|
||||
'f': true,
|
||||
'g': true,
|
||||
'h': true,
|
||||
'i': true,
|
||||
'j': true,
|
||||
'k': true,
|
||||
'l': true,
|
||||
'm': true,
|
||||
'n': true,
|
||||
'o': true,
|
||||
'p': true,
|
||||
'q': true,
|
||||
'r': true,
|
||||
's': true,
|
||||
't': true,
|
||||
'u': true,
|
||||
'v': true,
|
||||
'w': true,
|
||||
'x': true,
|
||||
'y': true,
|
||||
'z': true,
|
||||
'|': true,
|
||||
'~': true,
|
||||
}
|
||||
|
||||
// skipSpace returns a slice of the string s with all leading RFC 2616 linear
|
||||
// whitespace removed.
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if b := s[i]; b != ' ' && b != '\t' {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
// nextToken returns the leading RFC 2616 token of s and the string following
|
||||
// the token.
|
||||
func nextToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if !isTokenOctet[s[i]] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616
|
||||
// and the string following the token or quoted string.
|
||||
func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return nextToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding as
|
||||
// defined in RFC 4790.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if equalASCIIFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parseExtensions parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
// extension-list = 1#extension
|
||||
// extension = extension-token *( ";" extension-param )
|
||||
// extension-token = registered-token
|
||||
// registered-token = token
|
||||
// extension-param = token [ "=" (token | quoted-string) ]
|
||||
// ;When using the quoted-string syntax variant, the value
|
||||
// ;after quoted-string unescaping MUST conform to the
|
||||
// ;'token' ABNF.
|
||||
|
||||
var result []map[string]string
|
||||
headers:
|
||||
for _, s := range header["Sec-Websocket-Extensions"] {
|
||||
for {
|
||||
var t string
|
||||
t, s = nextToken(skipSpace(s))
|
||||
if t == "" {
|
||||
continue headers
|
||||
}
|
||||
ext := map[string]string{"": t}
|
||||
for {
|
||||
s = skipSpace(s)
|
||||
if !strings.HasPrefix(s, ";") {
|
||||
break
|
||||
}
|
||||
var k string
|
||||
k, s = nextToken(skipSpace(s[1:]))
|
||||
if k == "" {
|
||||
continue headers
|
||||
}
|
||||
s = skipSpace(s)
|
||||
var v string
|
||||
if strings.HasPrefix(s, "=") {
|
||||
v, s = nextTokenOrQuoted(skipSpace(s[1:]))
|
||||
s = skipSpace(s)
|
||||
}
|
||||
if s != "" && s[0] != ',' && s[0] != ';' {
|
||||
continue headers
|
||||
}
|
||||
ext[k] = v
|
||||
}
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
result = append(result, ext)
|
||||
if s == "" {
|
||||
continue headers
|
||||
}
|
||||
s = s[1:]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
@ -0,0 +1,473 @@
|
||||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
23
vendor/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
Normal file
23
vendor/github.com/hashicorp/golang-lru/.gitignore
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
223
vendor/github.com/hashicorp/golang-lru/2q.go
generated
vendored
Normal file
223
vendor/github.com/hashicorp/golang-lru/2q.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
const (
|
||||
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
|
||||
// to recently added entries that have only been accessed once.
|
||||
Default2QRecentRatio = 0.25
|
||||
|
||||
// Default2QGhostEntries is the default ratio of ghost
|
||||
// entries kept to track entries recently evicted
|
||||
Default2QGhostEntries = 0.50
|
||||
)
|
||||
|
||||
// TwoQueueCache is a thread-safe fixed size 2Q cache.
|
||||
// 2Q is an enhancement over the standard LRU cache
|
||||
// in that it tracks both frequently and recently used
|
||||
// entries separately. This avoids a burst in access to new
|
||||
// entries from evicting frequently used entries. It adds some
|
||||
// additional tracking overhead to the standard LRU cache, and is
|
||||
// computationally about 2x the cost, and adds some metadata over
|
||||
// head. The ARCCache is similar, but does not require setting any
|
||||
// parameters.
|
||||
type TwoQueueCache struct {
|
||||
size int
|
||||
recentSize int
|
||||
|
||||
recent simplelru.LRUCache
|
||||
frequent simplelru.LRUCache
|
||||
recentEvict simplelru.LRUCache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// New2Q creates a new TwoQueueCache using the default
|
||||
// values for the parameters.
|
||||
func New2Q(size int) (*TwoQueueCache, error) {
|
||||
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
|
||||
}
|
||||
|
||||
// New2QParams creates a new TwoQueueCache using the provided
|
||||
// parameter values.
|
||||
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
|
||||
if size <= 0 {
|
||||
return nil, fmt.Errorf("invalid size")
|
||||
}
|
||||
if recentRatio < 0.0 || recentRatio > 1.0 {
|
||||
return nil, fmt.Errorf("invalid recent ratio")
|
||||
}
|
||||
if ghostRatio < 0.0 || ghostRatio > 1.0 {
|
||||
return nil, fmt.Errorf("invalid ghost ratio")
|
||||
}
|
||||
|
||||
// Determine the sub-sizes
|
||||
recentSize := int(float64(size) * recentRatio)
|
||||
evictSize := int(float64(size) * ghostRatio)
|
||||
|
||||
// Allocate the LRUs
|
||||
recent, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frequent, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
recentEvict, err := simplelru.NewLRU(evictSize, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the cache
|
||||
c := &TwoQueueCache{
|
||||
size: size,
|
||||
recentSize: recentSize,
|
||||
recent: recent,
|
||||
frequent: frequent,
|
||||
recentEvict: recentEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if this is a frequent value
|
||||
if val, ok := c.frequent.Get(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// If the value is contained in recent, then we
|
||||
// promote it to frequent
|
||||
if val, ok := c.recent.Peek(key); ok {
|
||||
c.recent.Remove(key)
|
||||
c.frequent.Add(key, val)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// No hit
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *TwoQueueCache) Add(key, value interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if the value is frequently used already,
|
||||
// and just update the value
|
||||
if c.frequent.Contains(key) {
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the value is recently used, and promote
|
||||
// the value into the frequent list
|
||||
if c.recent.Contains(key) {
|
||||
c.recent.Remove(key)
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// If the value was recently evicted, add it to the
|
||||
// frequently used list
|
||||
if c.recentEvict.Contains(key) {
|
||||
c.ensureSpace(true)
|
||||
c.recentEvict.Remove(key)
|
||||
c.frequent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Add to the recently seen list
|
||||
c.ensureSpace(false)
|
||||
c.recent.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// ensureSpace is used to ensure we have space in the cache
|
||||
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
|
||||
// If we have space, nothing to do
|
||||
recentLen := c.recent.Len()
|
||||
freqLen := c.frequent.Len()
|
||||
if recentLen+freqLen < c.size {
|
||||
return
|
||||
}
|
||||
|
||||
// If the recent buffer is larger than
|
||||
// the target, evict from there
|
||||
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
|
||||
k, _, _ := c.recent.RemoveOldest()
|
||||
c.recentEvict.Add(k, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove from the frequent list otherwise
|
||||
c.frequent.RemoveOldest()
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *TwoQueueCache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.recent.Len() + c.frequent.Len()
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache.
|
||||
// The frequently used keys are first in the returned slice.
|
||||
func (c *TwoQueueCache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
k1 := c.frequent.Keys()
|
||||
k2 := c.recent.Keys()
|
||||
return append(k1, k2...)
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *TwoQueueCache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.frequent.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.recent.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.recentEvict.Remove(key) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *TwoQueueCache) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.recent.Purge()
|
||||
c.frequent.Purge()
|
||||
c.recentEvict.Purge()
|
||||
}
|
||||
|
||||
// Contains is used to check if the cache contains a key
|
||||
// without updating recency or frequency.
|
||||
func (c *TwoQueueCache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.frequent.Contains(key) || c.recent.Contains(key)
|
||||
}
|
||||
|
||||
// Peek is used to inspect the cache value of a key
|
||||
// without updating recency or frequency.
|
||||
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if val, ok := c.frequent.Peek(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
return c.recent.Peek(key)
|
||||
}
|
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
362
vendor/github.com/hashicorp/golang-lru/LICENSE
generated
vendored
Normal file
@ -0,0 +1,362 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
25
vendor/github.com/hashicorp/golang-lru/README.md
generated
vendored
Normal file
25
vendor/github.com/hashicorp/golang-lru/README.md
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
golang-lru
|
||||
==========
|
||||
|
||||
This provides the `lru` package which implements a fixed-size
|
||||
thread safe LRU cache. It is based on the cache in Groupcache.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Using the LRU is very simple:
|
||||
|
||||
```go
|
||||
l, _ := New(128)
|
||||
for i := 0; i < 256; i++ {
|
||||
l.Add(i, nil)
|
||||
}
|
||||
if l.Len() != 128 {
|
||||
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
||||
}
|
||||
```
|
257
vendor/github.com/hashicorp/golang-lru/arc.go
generated
vendored
Normal file
257
vendor/github.com/hashicorp/golang-lru/arc.go
generated
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
|
||||
// ARC is an enhancement over the standard LRU cache in that tracks both
|
||||
// frequency and recency of use. This avoids a burst in access to new
|
||||
// entries from evicting the frequently used older entries. It adds some
|
||||
// additional tracking overhead to a standard LRU cache, computationally
|
||||
// it is roughly 2x the cost, and the extra memory overhead is linear
|
||||
// with the size of the cache. ARC has been patented by IBM, but is
|
||||
// similar to the TwoQueueCache (2Q) which requires setting parameters.
|
||||
type ARCCache struct {
|
||||
size int // Size is the total capacity of the cache
|
||||
p int // P is the dynamic preference towards T1 or T2
|
||||
|
||||
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
|
||||
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
|
||||
|
||||
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
|
||||
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
|
||||
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// NewARC creates an ARC of the given size
|
||||
func NewARC(size int) (*ARCCache, error) {
|
||||
// Create the sub LRUs
|
||||
b1, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b2, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t1, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t2, err := simplelru.NewLRU(size, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize the ARC
|
||||
c := &ARCCache{
|
||||
size: size,
|
||||
p: 0,
|
||||
t1: t1,
|
||||
b1: b1,
|
||||
t2: t2,
|
||||
b2: b2,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// If the value is contained in T1 (recent), then
|
||||
// promote it to T2 (frequent)
|
||||
if val, ok := c.t1.Peek(key); ok {
|
||||
c.t1.Remove(key)
|
||||
c.t2.Add(key, val)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// Check if the value is contained in T2 (frequent)
|
||||
if val, ok := c.t2.Get(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// No hit
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *ARCCache) Add(key, value interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Check if the value is contained in T1 (recent), and potentially
|
||||
// promote it to frequent T2
|
||||
if c.t1.Contains(key) {
|
||||
c.t1.Remove(key)
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the value is already in T2 (frequent) and update it
|
||||
if c.t2.Contains(key) {
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this value was recently evicted as part of the
|
||||
// recently used list
|
||||
if c.b1.Contains(key) {
|
||||
// T1 set is too small, increase P appropriately
|
||||
delta := 1
|
||||
b1Len := c.b1.Len()
|
||||
b2Len := c.b2.Len()
|
||||
if b2Len > b1Len {
|
||||
delta = b2Len / b1Len
|
||||
}
|
||||
if c.p+delta >= c.size {
|
||||
c.p = c.size
|
||||
} else {
|
||||
c.p += delta
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(false)
|
||||
}
|
||||
|
||||
// Remove from B1
|
||||
c.b1.Remove(key)
|
||||
|
||||
// Add the key to the frequently used list
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this value was recently evicted as part of the
|
||||
// frequently used list
|
||||
if c.b2.Contains(key) {
|
||||
// T2 set is too small, decrease P appropriately
|
||||
delta := 1
|
||||
b1Len := c.b1.Len()
|
||||
b2Len := c.b2.Len()
|
||||
if b1Len > b2Len {
|
||||
delta = b1Len / b2Len
|
||||
}
|
||||
if delta >= c.p {
|
||||
c.p = 0
|
||||
} else {
|
||||
c.p -= delta
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(true)
|
||||
}
|
||||
|
||||
// Remove from B2
|
||||
c.b2.Remove(key)
|
||||
|
||||
// Add the key to the frequently used list
|
||||
c.t2.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// Potentially need to make room in the cache
|
||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||
c.replace(false)
|
||||
}
|
||||
|
||||
// Keep the size of the ghost buffers trim
|
||||
if c.b1.Len() > c.size-c.p {
|
||||
c.b1.RemoveOldest()
|
||||
}
|
||||
if c.b2.Len() > c.p {
|
||||
c.b2.RemoveOldest()
|
||||
}
|
||||
|
||||
// Add to the recently seen list
|
||||
c.t1.Add(key, value)
|
||||
return
|
||||
}
|
||||
|
||||
// replace is used to adaptively evict from either T1 or T2
|
||||
// based on the current learned value of P
|
||||
func (c *ARCCache) replace(b2ContainsKey bool) {
|
||||
t1Len := c.t1.Len()
|
||||
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
|
||||
k, _, ok := c.t1.RemoveOldest()
|
||||
if ok {
|
||||
c.b1.Add(k, nil)
|
||||
}
|
||||
} else {
|
||||
k, _, ok := c.t2.RemoveOldest()
|
||||
if ok {
|
||||
c.b2.Add(k, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of cached entries
|
||||
func (c *ARCCache) Len() int {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.t1.Len() + c.t2.Len()
|
||||
}
|
||||
|
||||
// Keys returns all the cached keys
|
||||
func (c *ARCCache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
k1 := c.t1.Keys()
|
||||
k2 := c.t2.Keys()
|
||||
return append(k1, k2...)
|
||||
}
|
||||
|
||||
// Remove is used to purge a key from the cache
|
||||
func (c *ARCCache) Remove(key interface{}) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
if c.t1.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.t2.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.b1.Remove(key) {
|
||||
return
|
||||
}
|
||||
if c.b2.Remove(key) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Purge is used to clear the cache
|
||||
func (c *ARCCache) Purge() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.t1.Purge()
|
||||
c.t2.Purge()
|
||||
c.b1.Purge()
|
||||
c.b2.Purge()
|
||||
}
|
||||
|
||||
// Contains is used to check if the cache contains a key
|
||||
// without updating recency or frequency.
|
||||
func (c *ARCCache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
return c.t1.Contains(key) || c.t2.Contains(key)
|
||||
}
|
||||
|
||||
// Peek is used to inspect the cache value of a key
|
||||
// without updating recency or frequency.
|
||||
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
if val, ok := c.t1.Peek(key); ok {
|
||||
return val, ok
|
||||
}
|
||||
return c.t2.Peek(key)
|
||||
}
|
21
vendor/github.com/hashicorp/golang-lru/doc.go
generated
vendored
Normal file
21
vendor/github.com/hashicorp/golang-lru/doc.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
// Package lru provides three different LRU caches of varying sophistication.
|
||||
//
|
||||
// Cache is a simple LRU cache. It is based on the
|
||||
// LRU implementation in groupcache:
|
||||
// https://github.com/golang/groupcache/tree/master/lru
|
||||
//
|
||||
// TwoQueueCache tracks frequently used and recently used entries separately.
|
||||
// This avoids a burst of accesses from taking out frequently used entries,
|
||||
// at the cost of about 2x computational overhead and some extra bookkeeping.
|
||||
//
|
||||
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
|
||||
// well as recent usage in both the frequent and recent caches. Its
|
||||
// computational overhead is comparable to TwoQueueCache, but the memory
|
||||
// overhead is linear with the size of the cache.
|
||||
//
|
||||
// ARC has been patented by IBM, so do not use it if that is problematic for
|
||||
// your program.
|
||||
//
|
||||
// All caches in this package take locks while operating, and are therefore
|
||||
// thread-safe for consumers.
|
||||
package lru
|
3
vendor/github.com/hashicorp/golang-lru/go.mod
generated
vendored
Normal file
3
vendor/github.com/hashicorp/golang-lru/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/hashicorp/golang-lru
|
||||
|
||||
go 1.12
|
150
vendor/github.com/hashicorp/golang-lru/lru.go
generated
vendored
Normal file
150
vendor/github.com/hashicorp/golang-lru/lru.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
package lru
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
// Cache is a thread-safe fixed size LRU cache.
|
||||
type Cache struct {
|
||||
lru simplelru.LRUCache
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
// New creates an LRU of the given size.
|
||||
func New(size int) (*Cache, error) {
|
||||
return NewWithEvict(size, nil)
|
||||
}
|
||||
|
||||
// NewWithEvict constructs a fixed size cache with the given eviction
|
||||
// callback.
|
||||
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
|
||||
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Cache{
|
||||
lru: lru,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *Cache) Purge() {
|
||||
c.lock.Lock()
|
||||
c.lru.Purge()
|
||||
c.lock.Unlock()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *Cache) Add(key, value interface{}) (evicted bool) {
|
||||
c.lock.Lock()
|
||||
evicted = c.lru.Add(key, value)
|
||||
c.lock.Unlock()
|
||||
return evicted
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
value, ok = c.lru.Get(key)
|
||||
c.lock.Unlock()
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the
|
||||
// recent-ness or deleting it for being stale.
|
||||
func (c *Cache) Contains(key interface{}) bool {
|
||||
c.lock.RLock()
|
||||
containKey := c.lru.Contains(key)
|
||||
c.lock.RUnlock()
|
||||
return containKey
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
c.lock.RLock()
|
||||
value, ok = c.lru.Peek(key)
|
||||
c.lock.RUnlock()
|
||||
return value, ok
|
||||
}
|
||||
|
||||
// ContainsOrAdd checks if a key is in the cache without updating the
|
||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
||||
// Returns whether found and whether an eviction occurred.
|
||||
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.lru.Contains(key) {
|
||||
return true, false
|
||||
}
|
||||
evicted = c.lru.Add(key, value)
|
||||
return false, evicted
|
||||
}
|
||||
|
||||
// PeekOrAdd checks if a key is in the cache without updating the
|
||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
||||
// Returns whether found and whether an eviction occurred.
|
||||
func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
previous, ok = c.lru.Peek(key)
|
||||
if ok {
|
||||
return previous, true, false
|
||||
}
|
||||
|
||||
evicted = c.lru.Add(key, value)
|
||||
return nil, false, evicted
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key interface{}) (present bool) {
|
||||
c.lock.Lock()
|
||||
present = c.lru.Remove(key)
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *Cache) Resize(size int) (evicted int) {
|
||||
c.lock.Lock()
|
||||
evicted = c.lru.Resize(size)
|
||||
c.lock.Unlock()
|
||||
return evicted
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
key, value, ok = c.lru.RemoveOldest()
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
key, value, ok = c.lru.GetOldest()
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *Cache) Keys() []interface{} {
|
||||
c.lock.RLock()
|
||||
keys := c.lru.Keys()
|
||||
c.lock.RUnlock()
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
c.lock.RLock()
|
||||
length := c.lru.Len()
|
||||
c.lock.RUnlock()
|
||||
return length
|
||||
}
|
177
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
Normal file
177
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
package simplelru
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||
type EvictCallback func(key interface{}, value interface{})
|
||||
|
||||
// LRU implements a non-thread safe fixed size LRU cache
|
||||
type LRU struct {
|
||||
size int
|
||||
evictList *list.List
|
||||
items map[interface{}]*list.Element
|
||||
onEvict EvictCallback
|
||||
}
|
||||
|
||||
// entry is used to hold a value in the evictList
|
||||
type entry struct {
|
||||
key interface{}
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// NewLRU constructs an LRU of the given size
|
||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||
if size <= 0 {
|
||||
return nil, errors.New("Must provide a positive size")
|
||||
}
|
||||
c := &LRU{
|
||||
size: size,
|
||||
evictList: list.New(),
|
||||
items: make(map[interface{}]*list.Element),
|
||||
onEvict: onEvict,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Purge is used to completely clear the cache.
|
||||
func (c *LRU) Purge() {
|
||||
for k, v := range c.items {
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(k, v.Value.(*entry).value)
|
||||
}
|
||||
delete(c.items, k)
|
||||
}
|
||||
c.evictList.Init()
|
||||
}
|
||||
|
||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||
func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
||||
// Check for existing item
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
ent.Value.(*entry).value = value
|
||||
return false
|
||||
}
|
||||
|
||||
// Add new item
|
||||
ent := &entry{key, value}
|
||||
entry := c.evictList.PushFront(ent)
|
||||
c.items[key] = entry
|
||||
|
||||
evict := c.evictList.Len() > c.size
|
||||
// Verify size not exceeded
|
||||
if evict {
|
||||
c.removeOldest()
|
||||
}
|
||||
return evict
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
if ent.Value.(*entry) == nil {
|
||||
return nil, false
|
||||
}
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
||||
// or deleting it for being stale.
|
||||
func (c *LRU) Contains(key interface{}) (ok bool) {
|
||||
_, ok = c.items[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Peek returns the key value (or undefined if not found) without updating
|
||||
// the "recently used"-ness of the key.
|
||||
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
||||
var ent *list.Element
|
||||
if ent, ok = c.items[key]; ok {
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache, returning if the
|
||||
// key was contained.
|
||||
func (c *LRU) Remove(key interface{}) (present bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.removeElement(ent)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
kv := ent.Value.(*entry)
|
||||
return kv.key, kv.value, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
func (c *LRU) Keys() []interface{} {
|
||||
keys := make([]interface{}, len(c.items))
|
||||
i := 0
|
||||
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
||||
keys[i] = ent.Value.(*entry).key
|
||||
i++
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *LRU) Len() int {
|
||||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *LRU) Resize(size int) (evicted int) {
|
||||
diff := c.Len() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
if ent != nil {
|
||||
c.removeElement(ent)
|
||||
}
|
||||
}
|
||||
|
||||
// removeElement is used to remove a given list element from the cache
|
||||
func (c *LRU) removeElement(e *list.Element) {
|
||||
c.evictList.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.items, kv.key)
|
||||
if c.onEvict != nil {
|
||||
c.onEvict(kv.key, kv.value)
|
||||
}
|
||||
}
|
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
Normal file
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package simplelru
|
||||
|
||||
// LRUCache is the interface for simple LRU cache.
|
||||
type LRUCache interface {
|
||||
// Adds a value to the cache, returns true if an eviction occurred and
|
||||
// updates the "recently used"-ness of the key.
|
||||
Add(key, value interface{}) bool
|
||||
|
||||
// Returns key's value from the cache and
|
||||
// updates the "recently used"-ness of the key. #value, isFound
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Checks if a key exists in cache without updating the recent-ness.
|
||||
Contains(key interface{}) (ok bool)
|
||||
|
||||
// Returns key's value without updating the "recently used"-ness of the key.
|
||||
Peek(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Removes a key from the cache.
|
||||
Remove(key interface{}) bool
|
||||
|
||||
// Removes the oldest entry from cache.
|
||||
RemoveOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns the oldest entry from the cache. #key, value, isFound
|
||||
GetOldest() (interface{}, interface{}, bool)
|
||||
|
||||
// Returns a slice of the keys in the cache, from oldest to newest.
|
||||
Keys() []interface{}
|
||||
|
||||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
// Clears all cache entries.
|
||||
Purge()
|
||||
|
||||
// Resizes cache, returning number evicted
|
||||
Resize(int) int
|
||||
}
|
2
vendor/github.com/huin/goupnp/.gitignore
generated
vendored
Normal file
2
vendor/github.com/huin/goupnp/.gitignore
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*.zip
|
||||
*.sublime-workspace
|
23
vendor/github.com/huin/goupnp/LICENSE
generated
vendored
Normal file
23
vendor/github.com/huin/goupnp/LICENSE
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
Copyright (c) 2013, John Beisley <johnbeisleyuk@gmail.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
48
vendor/github.com/huin/goupnp/README.md
generated
vendored
Normal file
48
vendor/github.com/huin/goupnp/README.md
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
goupnp is a UPnP client library for Go
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
Run `go get -u github.com/huin/goupnp`.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
Supported DCPs (you probably want to start with one of these):
|
||||
|
||||
* [ av1](https://godoc.org/github.com/huin/goupnp/dcps/av1) - Client for UPnP Device Control Protocol MediaServer v1 and MediaRenderer v1.
|
||||
* [ internetgateway1](https://godoc.org/github.com/huin/goupnp/dcps/internetgateway1) - Client for UPnP Device Control Protocol Internet Gateway Device v1.
|
||||
* [ internetgateway2](https://godoc.org/github.com/huin/goupnp/dcps/internetgateway2) - Client for UPnP Device Control Protocol Internet Gateway Device v2.
|
||||
|
||||
Core components:
|
||||
|
||||
* [ (goupnp)](https://godoc.org/github.com/huin/goupnp) core library - contains datastructures and utilities typically used by the implemented DCPs.
|
||||
* [ httpu](https://godoc.org/github.com/huin/goupnp/httpu) HTTPU implementation, underlies SSDP.
|
||||
* [ ssdp](https://godoc.org/github.com/huin/goupnp/ssdp) SSDP client implementation (simple service discovery protocol) - used to discover UPnP services on a network.
|
||||
* [ soap](https://godoc.org/github.com/huin/goupnp/soap) SOAP client implementation (simple object access protocol) - used to communicate with discovered services.
|
||||
|
||||
|
||||
Regenerating dcps generated source code:
|
||||
----------------------------------------
|
||||
|
||||
1. Build code generator:
|
||||
|
||||
`go get -u github.com/huin/goupnp/cmd/goupnpdcpgen`
|
||||
|
||||
2. Regenerate the code:
|
||||
|
||||
`go generate ./...`
|
||||
|
||||
Supporting additional UPnP devices and services:
|
||||
------------------------------------------------
|
||||
|
||||
Supporting additional services is, in the trivial case, simply a matter of
|
||||
adding the service to the `dcpMetadata` whitelist in `cmd/goupnpdcpgen/metadata.go`,
|
||||
regenerating the source code (see above), and committing that source code.
|
||||
|
||||
However, it would be helpful if anyone needing such a service could test the
|
||||
service against the service they have, and then reporting any trouble
|
||||
encountered as an [issue on this
|
||||
project](https://github.com/huin/goupnp/issues/new). If it just works, then
|
||||
please report at least minimal working functionality as an issue, and
|
||||
optionally contribute the metadata upstream.
|
2
vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go
generated
vendored
Normal file
2
vendor/github.com/huin/goupnp/dcps/internetgateway1/gen.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
//go:generate goupnpdcpgen -dcp_name internetgateway1
|
||||
package internetgateway1
|
3651
vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go
generated
vendored
Normal file
3651
vendor/github.com/huin/goupnp/dcps/internetgateway1/internetgateway1.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go
generated
vendored
Normal file
2
vendor/github.com/huin/goupnp/dcps/internetgateway2/gen.go
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
//go:generate goupnpdcpgen -dcp_name internetgateway2
|
||||
package internetgateway2
|
5248
vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go
generated
vendored
Normal file
5248
vendor/github.com/huin/goupnp/dcps/internetgateway2/internetgateway2.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
190
vendor/github.com/huin/goupnp/device.go
generated
vendored
Normal file
190
vendor/github.com/huin/goupnp/device.go
generated
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
// This file contains XML structures for communicating with UPnP devices.
|
||||
|
||||
package goupnp
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/huin/goupnp/scpd"
|
||||
"github.com/huin/goupnp/soap"
|
||||
)
|
||||
|
||||
const (
|
||||
DeviceXMLNamespace = "urn:schemas-upnp-org:device-1-0"
|
||||
)
|
||||
|
||||
// RootDevice is the device description as described by section 2.3 "Device
|
||||
// description" in
|
||||
// http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.1.pdf
|
||||
type RootDevice struct {
|
||||
XMLName xml.Name `xml:"root"`
|
||||
SpecVersion SpecVersion `xml:"specVersion"`
|
||||
URLBase url.URL `xml:"-"`
|
||||
URLBaseStr string `xml:"URLBase"`
|
||||
Device Device `xml:"device"`
|
||||
}
|
||||
|
||||
// SetURLBase sets the URLBase for the RootDevice and its underlying components.
|
||||
func (root *RootDevice) SetURLBase(urlBase *url.URL) {
|
||||
root.URLBase = *urlBase
|
||||
root.URLBaseStr = urlBase.String()
|
||||
root.Device.SetURLBase(urlBase)
|
||||
}
|
||||
|
||||
// SpecVersion is part of a RootDevice, describes the version of the
|
||||
// specification that the data adheres to.
|
||||
type SpecVersion struct {
|
||||
Major int32 `xml:"major"`
|
||||
Minor int32 `xml:"minor"`
|
||||
}
|
||||
|
||||
// Device is a UPnP device. It can have child devices.
|
||||
type Device struct {
|
||||
DeviceType string `xml:"deviceType"`
|
||||
FriendlyName string `xml:"friendlyName"`
|
||||
Manufacturer string `xml:"manufacturer"`
|
||||
ManufacturerURL URLField `xml:"manufacturerURL"`
|
||||
ModelDescription string `xml:"modelDescription"`
|
||||
ModelName string `xml:"modelName"`
|
||||
ModelNumber string `xml:"modelNumber"`
|
||||
ModelURL URLField `xml:"modelURL"`
|
||||
SerialNumber string `xml:"serialNumber"`
|
||||
UDN string `xml:"UDN"`
|
||||
UPC string `xml:"UPC,omitempty"`
|
||||
Icons []Icon `xml:"iconList>icon,omitempty"`
|
||||
Services []Service `xml:"serviceList>service,omitempty"`
|
||||
Devices []Device `xml:"deviceList>device,omitempty"`
|
||||
|
||||
// Extra observed elements:
|
||||
PresentationURL URLField `xml:"presentationURL"`
|
||||
}
|
||||
|
||||
// VisitDevices calls visitor for the device, and all its descendent devices.
|
||||
func (device *Device) VisitDevices(visitor func(*Device)) {
|
||||
visitor(device)
|
||||
for i := range device.Devices {
|
||||
device.Devices[i].VisitDevices(visitor)
|
||||
}
|
||||
}
|
||||
|
||||
// VisitServices calls visitor for all Services under the device and all its
|
||||
// descendent devices.
|
||||
func (device *Device) VisitServices(visitor func(*Service)) {
|
||||
device.VisitDevices(func(d *Device) {
|
||||
for i := range d.Services {
|
||||
visitor(&d.Services[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// FindService finds all (if any) Services under the device and its descendents
|
||||
// that have the given ServiceType.
|
||||
func (device *Device) FindService(serviceType string) []*Service {
|
||||
var services []*Service
|
||||
device.VisitServices(func(s *Service) {
|
||||
if s.ServiceType == serviceType {
|
||||
services = append(services, s)
|
||||
}
|
||||
})
|
||||
return services
|
||||
}
|
||||
|
||||
// SetURLBase sets the URLBase for the Device and its underlying components.
|
||||
func (device *Device) SetURLBase(urlBase *url.URL) {
|
||||
device.ManufacturerURL.SetURLBase(urlBase)
|
||||
device.ModelURL.SetURLBase(urlBase)
|
||||
device.PresentationURL.SetURLBase(urlBase)
|
||||
for i := range device.Icons {
|
||||
device.Icons[i].SetURLBase(urlBase)
|
||||
}
|
||||
for i := range device.Services {
|
||||
device.Services[i].SetURLBase(urlBase)
|
||||
}
|
||||
for i := range device.Devices {
|
||||
device.Devices[i].SetURLBase(urlBase)
|
||||
}
|
||||
}
|
||||
|
||||
func (device *Device) String() string {
|
||||
return fmt.Sprintf("Device ID %s : %s (%s)", device.UDN, device.DeviceType, device.FriendlyName)
|
||||
}
|
||||
|
||||
// Icon is a representative image that a device might include in its
|
||||
// description.
|
||||
type Icon struct {
|
||||
Mimetype string `xml:"mimetype"`
|
||||
Width int32 `xml:"width"`
|
||||
Height int32 `xml:"height"`
|
||||
Depth int32 `xml:"depth"`
|
||||
URL URLField `xml:"url"`
|
||||
}
|
||||
|
||||
// SetURLBase sets the URLBase for the Icon.
|
||||
func (icon *Icon) SetURLBase(url *url.URL) {
|
||||
icon.URL.SetURLBase(url)
|
||||
}
|
||||
|
||||
// Service is a service provided by a UPnP Device.
|
||||
type Service struct {
|
||||
ServiceType string `xml:"serviceType"`
|
||||
ServiceId string `xml:"serviceId"`
|
||||
SCPDURL URLField `xml:"SCPDURL"`
|
||||
ControlURL URLField `xml:"controlURL"`
|
||||
EventSubURL URLField `xml:"eventSubURL"`
|
||||
}
|
||||
|
||||
// SetURLBase sets the URLBase for the Service.
|
||||
func (srv *Service) SetURLBase(urlBase *url.URL) {
|
||||
srv.SCPDURL.SetURLBase(urlBase)
|
||||
srv.ControlURL.SetURLBase(urlBase)
|
||||
srv.EventSubURL.SetURLBase(urlBase)
|
||||
}
|
||||
|
||||
func (srv *Service) String() string {
|
||||
return fmt.Sprintf("Service ID %s : %s", srv.ServiceId, srv.ServiceType)
|
||||
}
|
||||
|
||||
// RequestSCPD requests the SCPD (soap actions and state variables description)
|
||||
// for the service.
|
||||
func (srv *Service) RequestSCPD() (*scpd.SCPD, error) {
|
||||
if !srv.SCPDURL.Ok {
|
||||
return nil, errors.New("bad/missing SCPD URL, or no URLBase has been set")
|
||||
}
|
||||
s := new(scpd.SCPD)
|
||||
if err := requestXml(srv.SCPDURL.URL.String(), scpd.SCPDXMLNamespace, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// RequestSCDP is for compatibility only, prefer RequestSCPD. This was a
|
||||
// misspelling of RequestSCDP.
|
||||
func (srv *Service) RequestSCDP() (*scpd.SCPD, error) {
|
||||
return srv.RequestSCPD()
|
||||
}
|
||||
|
||||
func (srv *Service) NewSOAPClient() *soap.SOAPClient {
|
||||
return soap.NewSOAPClient(srv.ControlURL.URL)
|
||||
}
|
||||
|
||||
// URLField is a URL that is part of a device description.
|
||||
type URLField struct {
|
||||
URL url.URL `xml:"-"`
|
||||
Ok bool `xml:"-"`
|
||||
Str string `xml:",chardata"`
|
||||
}
|
||||
|
||||
func (uf *URLField) SetURLBase(urlBase *url.URL) {
|
||||
refUrl, err := url.Parse(uf.Str)
|
||||
if err != nil {
|
||||
uf.URL = url.URL{}
|
||||
uf.Ok = false
|
||||
return
|
||||
}
|
||||
|
||||
uf.URL = *urlBase.ResolveReference(refUrl)
|
||||
uf.Ok = true
|
||||
}
|
7
vendor/github.com/huin/goupnp/go.mod
generated
vendored
Normal file
7
vendor/github.com/huin/goupnp/go.mod
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
module github.com/huin/goupnp
|
||||
|
||||
require (
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150
|
||||
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1
|
||||
golang.org/x/text v0.3.0 // indirect
|
||||
)
|
6
vendor/github.com/huin/goupnp/go.sum
generated
vendored
Normal file
6
vendor/github.com/huin/goupnp/go.sum
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
|
||||
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
|
||||
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1 h1:Y/KGZSOdz/2r0WJ9Mkmz6NJBusp0kiNx1Cn82lzJQ6w=
|
||||
golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
131
vendor/github.com/huin/goupnp/goupnp.go
generated
vendored
Normal file
131
vendor/github.com/huin/goupnp/goupnp.go
generated
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
// goupnp is an implementation of a client for various UPnP services.
|
||||
//
|
||||
// For most uses, it is recommended to use the code-generated packages under
|
||||
// github.com/huin/goupnp/dcps. Example use is shown at
|
||||
// http://godoc.org/github.com/huin/goupnp/example
|
||||
//
|
||||
// A commonly used client is internetgateway1.WANPPPConnection1:
|
||||
// http://godoc.org/github.com/huin/goupnp/dcps/internetgateway1#WANPPPConnection1
|
||||
//
|
||||
// Currently only a couple of schemas have code generated for them from the
|
||||
// UPnP example XML specifications. Not all methods will work on these clients,
|
||||
// because the generated stubs contain the full set of specified methods from
|
||||
// the XML specifications, and the discovered services will likely support a
|
||||
// subset of those methods.
|
||||
package goupnp
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/html/charset"
|
||||
|
||||
"github.com/huin/goupnp/httpu"
|
||||
"github.com/huin/goupnp/ssdp"
|
||||
)
|
||||
|
||||
// ContextError is an error that wraps an error with some context information.
|
||||
type ContextError struct {
|
||||
Context string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (err ContextError) Error() string {
|
||||
return fmt.Sprintf("%s: %v", err.Context, err.Err)
|
||||
}
|
||||
|
||||
// MaybeRootDevice contains either a RootDevice or an error.
|
||||
type MaybeRootDevice struct {
|
||||
// Set iff Err == nil.
|
||||
Root *RootDevice
|
||||
|
||||
// The location the device was discovered at. This can be used with
|
||||
// DeviceByURL, assuming the device is still present. A location represents
|
||||
// the discovery of a device, regardless of if there was an error probing it.
|
||||
Location *url.URL
|
||||
|
||||
// Any error encountered probing a discovered device.
|
||||
Err error
|
||||
}
|
||||
|
||||
// DiscoverDevices attempts to find targets of the given type. This is
|
||||
// typically the entry-point for this package. searchTarget is typically a URN
|
||||
// in the form "urn:schemas-upnp-org:device:..." or
|
||||
// "urn:schemas-upnp-org:service:...". A single error is returned for errors
|
||||
// while attempting to send the query. An error or RootDevice is returned for
|
||||
// each discovered RootDevice.
|
||||
func DiscoverDevices(searchTarget string) ([]MaybeRootDevice, error) {
|
||||
httpu, err := httpu.NewHTTPUClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer httpu.Close()
|
||||
responses, err := ssdp.SSDPRawSearch(httpu, string(searchTarget), 2, 3)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
results := make([]MaybeRootDevice, len(responses))
|
||||
for i, response := range responses {
|
||||
maybe := &results[i]
|
||||
loc, err := response.Location()
|
||||
if err != nil {
|
||||
maybe.Err = ContextError{"unexpected bad location from search", err}
|
||||
continue
|
||||
}
|
||||
maybe.Location = loc
|
||||
if root, err := DeviceByURL(loc); err != nil {
|
||||
maybe.Err = err
|
||||
} else {
|
||||
maybe.Root = root
|
||||
}
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func DeviceByURL(loc *url.URL) (*RootDevice, error) {
|
||||
locStr := loc.String()
|
||||
root := new(RootDevice)
|
||||
if err := requestXml(locStr, DeviceXMLNamespace, root); err != nil {
|
||||
return nil, ContextError{fmt.Sprintf("error requesting root device details from %q", locStr), err}
|
||||
}
|
||||
var urlBaseStr string
|
||||
if root.URLBaseStr != "" {
|
||||
urlBaseStr = root.URLBaseStr
|
||||
} else {
|
||||
urlBaseStr = locStr
|
||||
}
|
||||
urlBase, err := url.Parse(urlBaseStr)
|
||||
if err != nil {
|
||||
return nil, ContextError{fmt.Sprintf("error parsing location URL %q", locStr), err}
|
||||
}
|
||||
root.SetURLBase(urlBase)
|
||||
return root, nil
|
||||
}
|
||||
|
||||
func requestXml(url string, defaultSpace string, doc interface{}) error {
|
||||
timeout := time.Duration(3 * time.Second)
|
||||
client := http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return fmt.Errorf("goupnp: got response status %s from %q",
|
||||
resp.Status, url)
|
||||
}
|
||||
|
||||
decoder := xml.NewDecoder(resp.Body)
|
||||
decoder.DefaultSpace = defaultSpace
|
||||
decoder.CharsetReader = charset.NewReaderLabel
|
||||
|
||||
return decoder.Decode(doc)
|
||||
}
|
8
vendor/github.com/huin/goupnp/goupnp.sublime-project
generated
vendored
Normal file
8
vendor/github.com/huin/goupnp/goupnp.sublime-project
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"folders":
|
||||
[
|
||||
{
|
||||
"path": "."
|
||||
}
|
||||
]
|
||||
}
|
134
vendor/github.com/huin/goupnp/httpu/httpu.go
generated
vendored
Normal file
134
vendor/github.com/huin/goupnp/httpu/httpu.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
package httpu
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HTTPUClient is a client for dealing with HTTPU (HTTP over UDP). Its typical
|
||||
// function is for HTTPMU, and particularly SSDP.
|
||||
type HTTPUClient struct {
|
||||
connLock sync.Mutex // Protects use of conn.
|
||||
conn net.PacketConn
|
||||
}
|
||||
|
||||
// NewHTTPUClient creates a new HTTPUClient, opening up a new UDP socket for the
|
||||
// purpose.
|
||||
func NewHTTPUClient() (*HTTPUClient, error) {
|
||||
conn, err := net.ListenPacket("udp", ":0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &HTTPUClient{conn: conn}, nil
|
||||
}
|
||||
|
||||
// NewHTTPUClientAddr creates a new HTTPUClient which will broadcast packets
|
||||
// from the specified address, opening up a new UDP socket for the purpose
|
||||
func NewHTTPUClientAddr(addr string) (*HTTPUClient, error) {
|
||||
ip := net.ParseIP(addr)
|
||||
if ip == nil {
|
||||
return nil, errors.New("Invalid listening address")
|
||||
}
|
||||
conn, err := net.ListenPacket("udp", ip.String()+":0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &HTTPUClient{conn: conn}, nil
|
||||
}
|
||||
|
||||
// Close shuts down the client. The client will no longer be useful following
|
||||
// this.
|
||||
func (httpu *HTTPUClient) Close() error {
|
||||
httpu.connLock.Lock()
|
||||
defer httpu.connLock.Unlock()
|
||||
return httpu.conn.Close()
|
||||
}
|
||||
|
||||
// Do performs a request. The timeout is how long to wait for before returning
|
||||
// the responses that were received. An error is only returned for failing to
|
||||
// send the request. Failures in receipt simply do not add to the resulting
|
||||
// responses.
|
||||
//
|
||||
// Note that at present only one concurrent connection will happen per
|
||||
// HTTPUClient.
|
||||
func (httpu *HTTPUClient) Do(req *http.Request, timeout time.Duration, numSends int) ([]*http.Response, error) {
|
||||
httpu.connLock.Lock()
|
||||
defer httpu.connLock.Unlock()
|
||||
|
||||
// Create the request. This is a subset of what http.Request.Write does
|
||||
// deliberately to avoid creating extra fields which may confuse some
|
||||
// devices.
|
||||
var requestBuf bytes.Buffer
|
||||
method := req.Method
|
||||
if method == "" {
|
||||
method = "GET"
|
||||
}
|
||||
if _, err := fmt.Fprintf(&requestBuf, "%s %s HTTP/1.1\r\n", method, req.URL.RequestURI()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := req.Header.Write(&requestBuf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := requestBuf.Write([]byte{'\r', '\n'}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
destAddr, err := net.ResolveUDPAddr("udp", req.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = httpu.conn.SetDeadline(time.Now().Add(timeout)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Send request.
|
||||
for i := 0; i < numSends; i++ {
|
||||
if n, err := httpu.conn.WriteTo(requestBuf.Bytes(), destAddr); err != nil {
|
||||
return nil, err
|
||||
} else if n < len(requestBuf.Bytes()) {
|
||||
return nil, fmt.Errorf("httpu: wrote %d bytes rather than full %d in request",
|
||||
n, len(requestBuf.Bytes()))
|
||||
}
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
}
|
||||
|
||||
// Await responses until timeout.
|
||||
var responses []*http.Response
|
||||
responseBytes := make([]byte, 2048)
|
||||
for {
|
||||
// 2048 bytes should be sufficient for most networks.
|
||||
n, _, err := httpu.conn.ReadFrom(responseBytes)
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok {
|
||||
if err.Timeout() {
|
||||
break
|
||||
}
|
||||
if err.Temporary() {
|
||||
// Sleep in case this is a persistent error to avoid pegging CPU until deadline.
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse response.
|
||||
response, err := http.ReadResponse(bufio.NewReader(bytes.NewBuffer(responseBytes[:n])), req)
|
||||
if err != nil {
|
||||
log.Printf("httpu: error while parsing response: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
responses = append(responses, response)
|
||||
}
|
||||
|
||||
// Timeout reached - return discovered responses.
|
||||
return responses, nil
|
||||
}
|
108
vendor/github.com/huin/goupnp/httpu/serve.go
generated
vendored
Normal file
108
vendor/github.com/huin/goupnp/httpu/serve.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package httpu
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultMaxMessageBytes = 2048
|
||||
)
|
||||
|
||||
var (
|
||||
trailingWhitespaceRx = regexp.MustCompile(" +\r\n")
|
||||
crlf = []byte("\r\n")
|
||||
)
|
||||
|
||||
// Handler is the interface by which received HTTPU messages are passed to
|
||||
// handling code.
|
||||
type Handler interface {
|
||||
// ServeMessage is called for each HTTPU message received. peerAddr contains
|
||||
// the address that the message was received from.
|
||||
ServeMessage(r *http.Request)
|
||||
}
|
||||
|
||||
// HandlerFunc is a function-to-Handler adapter.
|
||||
type HandlerFunc func(r *http.Request)
|
||||
|
||||
func (f HandlerFunc) ServeMessage(r *http.Request) {
|
||||
f(r)
|
||||
}
|
||||
|
||||
// A Server defines parameters for running an HTTPU server.
|
||||
type Server struct {
|
||||
Addr string // UDP address to listen on
|
||||
Multicast bool // Should listen for multicast?
|
||||
Interface *net.Interface // Network interface to listen on for multicast, nil for default multicast interface
|
||||
Handler Handler // handler to invoke
|
||||
MaxMessageBytes int // maximum number of bytes to read from a packet, DefaultMaxMessageBytes if 0
|
||||
}
|
||||
|
||||
// ListenAndServe listens on the UDP network address srv.Addr. If srv.Multicast
|
||||
// is true, then a multicast UDP listener will be used on srv.Interface (or
|
||||
// default interface if nil).
|
||||
func (srv *Server) ListenAndServe() error {
|
||||
var err error
|
||||
|
||||
var addr *net.UDPAddr
|
||||
if addr, err = net.ResolveUDPAddr("udp", srv.Addr); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var conn net.PacketConn
|
||||
if srv.Multicast {
|
||||
if conn, err = net.ListenMulticastUDP("udp", srv.Interface, addr); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if conn, err = net.ListenUDP("udp", addr); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return srv.Serve(conn)
|
||||
}
|
||||
|
||||
// Serve messages received on the given packet listener to the srv.Handler.
|
||||
func (srv *Server) Serve(l net.PacketConn) error {
|
||||
maxMessageBytes := DefaultMaxMessageBytes
|
||||
if srv.MaxMessageBytes != 0 {
|
||||
maxMessageBytes = srv.MaxMessageBytes
|
||||
}
|
||||
for {
|
||||
buf := make([]byte, maxMessageBytes)
|
||||
n, peerAddr, err := l.ReadFrom(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[:n]
|
||||
|
||||
go func(buf []byte, peerAddr net.Addr) {
|
||||
// At least one router's UPnP implementation has added a trailing space
|
||||
// after "HTTP/1.1" - trim it.
|
||||
buf = trailingWhitespaceRx.ReplaceAllLiteral(buf, crlf)
|
||||
|
||||
req, err := http.ReadRequest(bufio.NewReader(bytes.NewBuffer(buf)))
|
||||
if err != nil {
|
||||
log.Printf("httpu: Failed to parse request: %v", err)
|
||||
return
|
||||
}
|
||||
req.RemoteAddr = peerAddr.String()
|
||||
srv.Handler.ServeMessage(req)
|
||||
// No need to call req.Body.Close - underlying reader is bytes.Buffer.
|
||||
}(buf, peerAddr)
|
||||
}
|
||||
}
|
||||
|
||||
// Serve messages received on the given packet listener to the given handler.
|
||||
func Serve(l net.PacketConn, handler Handler) error {
|
||||
srv := Server{
|
||||
Handler: handler,
|
||||
MaxMessageBytes: DefaultMaxMessageBytes,
|
||||
}
|
||||
return srv.Serve(l)
|
||||
}
|
167
vendor/github.com/huin/goupnp/scpd/scpd.go
generated
vendored
Normal file
167
vendor/github.com/huin/goupnp/scpd/scpd.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
package scpd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
SCPDXMLNamespace = "urn:schemas-upnp-org:service-1-0"
|
||||
)
|
||||
|
||||
func cleanWhitespace(s *string) {
|
||||
*s = strings.TrimSpace(*s)
|
||||
}
|
||||
|
||||
// SCPD is the service description as described by section 2.5 "Service
|
||||
// description" in
|
||||
// http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.1.pdf
|
||||
type SCPD struct {
|
||||
XMLName xml.Name `xml:"scpd"`
|
||||
ConfigId string `xml:"configId,attr"`
|
||||
SpecVersion SpecVersion `xml:"specVersion"`
|
||||
Actions []Action `xml:"actionList>action"`
|
||||
StateVariables []StateVariable `xml:"serviceStateTable>stateVariable"`
|
||||
}
|
||||
|
||||
// Clean attempts to remove stray whitespace etc. in the structure. It seems
|
||||
// unfortunately common for stray whitespace to be present in SCPD documents,
|
||||
// this method attempts to make it easy to clean them out.
|
||||
func (scpd *SCPD) Clean() {
|
||||
cleanWhitespace(&scpd.ConfigId)
|
||||
for i := range scpd.Actions {
|
||||
scpd.Actions[i].clean()
|
||||
}
|
||||
for i := range scpd.StateVariables {
|
||||
scpd.StateVariables[i].clean()
|
||||
}
|
||||
}
|
||||
|
||||
func (scpd *SCPD) GetStateVariable(variable string) *StateVariable {
|
||||
for i := range scpd.StateVariables {
|
||||
v := &scpd.StateVariables[i]
|
||||
if v.Name == variable {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scpd *SCPD) GetAction(action string) *Action {
|
||||
for i := range scpd.Actions {
|
||||
a := &scpd.Actions[i]
|
||||
if a.Name == action {
|
||||
return a
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SpecVersion is part of a SCPD document, describes the version of the
|
||||
// specification that the data adheres to.
|
||||
type SpecVersion struct {
|
||||
Major int32 `xml:"major"`
|
||||
Minor int32 `xml:"minor"`
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
Name string `xml:"name"`
|
||||
Arguments []Argument `xml:"argumentList>argument"`
|
||||
}
|
||||
|
||||
func (action *Action) clean() {
|
||||
cleanWhitespace(&action.Name)
|
||||
for i := range action.Arguments {
|
||||
action.Arguments[i].clean()
|
||||
}
|
||||
}
|
||||
|
||||
func (action *Action) InputArguments() []*Argument {
|
||||
var result []*Argument
|
||||
for i := range action.Arguments {
|
||||
arg := &action.Arguments[i]
|
||||
if arg.IsInput() {
|
||||
result = append(result, arg)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (action *Action) OutputArguments() []*Argument {
|
||||
var result []*Argument
|
||||
for i := range action.Arguments {
|
||||
arg := &action.Arguments[i]
|
||||
if arg.IsOutput() {
|
||||
result = append(result, arg)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
type Argument struct {
|
||||
Name string `xml:"name"`
|
||||
Direction string `xml:"direction"` // in|out
|
||||
RelatedStateVariable string `xml:"relatedStateVariable"` // ?
|
||||
Retval string `xml:"retval"` // ?
|
||||
}
|
||||
|
||||
func (arg *Argument) clean() {
|
||||
cleanWhitespace(&arg.Name)
|
||||
cleanWhitespace(&arg.Direction)
|
||||
cleanWhitespace(&arg.RelatedStateVariable)
|
||||
cleanWhitespace(&arg.Retval)
|
||||
}
|
||||
|
||||
func (arg *Argument) IsInput() bool {
|
||||
return arg.Direction == "in"
|
||||
}
|
||||
|
||||
func (arg *Argument) IsOutput() bool {
|
||||
return arg.Direction == "out"
|
||||
}
|
||||
|
||||
type StateVariable struct {
|
||||
Name string `xml:"name"`
|
||||
SendEvents string `xml:"sendEvents,attr"` // yes|no
|
||||
Multicast string `xml:"multicast,attr"` // yes|no
|
||||
DataType DataType `xml:"dataType"`
|
||||
DefaultValue string `xml:"defaultValue"`
|
||||
AllowedValueRange *AllowedValueRange `xml:"allowedValueRange"`
|
||||
AllowedValues []string `xml:"allowedValueList>allowedValue"`
|
||||
}
|
||||
|
||||
func (v *StateVariable) clean() {
|
||||
cleanWhitespace(&v.Name)
|
||||
cleanWhitespace(&v.SendEvents)
|
||||
cleanWhitespace(&v.Multicast)
|
||||
v.DataType.clean()
|
||||
cleanWhitespace(&v.DefaultValue)
|
||||
if v.AllowedValueRange != nil {
|
||||
v.AllowedValueRange.clean()
|
||||
}
|
||||
for i := range v.AllowedValues {
|
||||
cleanWhitespace(&v.AllowedValues[i])
|
||||
}
|
||||
}
|
||||
|
||||
type AllowedValueRange struct {
|
||||
Minimum string `xml:"minimum"`
|
||||
Maximum string `xml:"maximum"`
|
||||
Step string `xml:"step"`
|
||||
}
|
||||
|
||||
func (r *AllowedValueRange) clean() {
|
||||
cleanWhitespace(&r.Minimum)
|
||||
cleanWhitespace(&r.Maximum)
|
||||
cleanWhitespace(&r.Step)
|
||||
}
|
||||
|
||||
type DataType struct {
|
||||
Name string `xml:",chardata"`
|
||||
Type string `xml:"type,attr"`
|
||||
}
|
||||
|
||||
func (dt *DataType) clean() {
|
||||
cleanWhitespace(&dt.Name)
|
||||
cleanWhitespace(&dt.Type)
|
||||
}
|
88
vendor/github.com/huin/goupnp/service_client.go
generated
vendored
Normal file
88
vendor/github.com/huin/goupnp/service_client.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
package goupnp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/huin/goupnp/soap"
|
||||
)
|
||||
|
||||
// ServiceClient is a SOAP client, root device and the service for the SOAP
|
||||
// client rolled into one value. The root device, location, and service are
|
||||
// intended to be informational. Location can be used to later recreate a
|
||||
// ServiceClient with NewServiceClientByURL if the service is still present;
|
||||
// bypassing the discovery process.
|
||||
type ServiceClient struct {
|
||||
SOAPClient *soap.SOAPClient
|
||||
RootDevice *RootDevice
|
||||
Location *url.URL
|
||||
Service *Service
|
||||
}
|
||||
|
||||
// NewServiceClients discovers services, and returns clients for them. err will
|
||||
// report any error with the discovery process (blocking any device/service
|
||||
// discovery), errors reports errors on a per-root-device basis.
|
||||
func NewServiceClients(searchTarget string) (clients []ServiceClient, errors []error, err error) {
|
||||
var maybeRootDevices []MaybeRootDevice
|
||||
if maybeRootDevices, err = DiscoverDevices(searchTarget); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
clients = make([]ServiceClient, 0, len(maybeRootDevices))
|
||||
|
||||
for _, maybeRootDevice := range maybeRootDevices {
|
||||
if maybeRootDevice.Err != nil {
|
||||
errors = append(errors, maybeRootDevice.Err)
|
||||
continue
|
||||
}
|
||||
|
||||
deviceClients, err := NewServiceClientsFromRootDevice(maybeRootDevice.Root, maybeRootDevice.Location, searchTarget)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
clients = append(clients, deviceClients...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// NewServiceClientsByURL creates client(s) for the given service URN, for a
|
||||
// root device at the given URL.
|
||||
func NewServiceClientsByURL(loc *url.URL, searchTarget string) ([]ServiceClient, error) {
|
||||
rootDevice, err := DeviceByURL(loc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewServiceClientsFromRootDevice(rootDevice, loc, searchTarget)
|
||||
}
|
||||
|
||||
// NewServiceClientsFromDevice creates client(s) for the given service URN, in
|
||||
// a given root device. The loc parameter is simply assigned to the
|
||||
// Location attribute of the returned ServiceClient(s).
|
||||
func NewServiceClientsFromRootDevice(rootDevice *RootDevice, loc *url.URL, searchTarget string) ([]ServiceClient, error) {
|
||||
device := &rootDevice.Device
|
||||
srvs := device.FindService(searchTarget)
|
||||
if len(srvs) == 0 {
|
||||
return nil, fmt.Errorf("goupnp: service %q not found within device %q (UDN=%q)",
|
||||
searchTarget, device.FriendlyName, device.UDN)
|
||||
}
|
||||
|
||||
clients := make([]ServiceClient, 0, len(srvs))
|
||||
for _, srv := range srvs {
|
||||
clients = append(clients, ServiceClient{
|
||||
SOAPClient: srv.NewSOAPClient(),
|
||||
RootDevice: rootDevice,
|
||||
Location: loc,
|
||||
Service: srv,
|
||||
})
|
||||
}
|
||||
return clients, nil
|
||||
}
|
||||
|
||||
// GetServiceClient returns the ServiceClient itself. This is provided so that the
|
||||
// service client attributes can be accessed via an interface method on a
|
||||
// wrapping type.
|
||||
func (client *ServiceClient) GetServiceClient() *ServiceClient {
|
||||
return client
|
||||
}
|
193
vendor/github.com/huin/goupnp/soap/soap.go
generated
vendored
Normal file
193
vendor/github.com/huin/goupnp/soap/soap.go
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
// Definition for the SOAP structure required for UPnP's SOAP usage.
|
||||
|
||||
package soap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
soapEncodingStyle = "http://schemas.xmlsoap.org/soap/encoding/"
|
||||
soapPrefix = xml.Header + `<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"><s:Body>`
|
||||
soapSuffix = `</s:Body></s:Envelope>`
|
||||
)
|
||||
|
||||
type SOAPClient struct {
|
||||
EndpointURL url.URL
|
||||
HTTPClient http.Client
|
||||
}
|
||||
|
||||
func NewSOAPClient(endpointURL url.URL) *SOAPClient {
|
||||
return &SOAPClient{
|
||||
EndpointURL: endpointURL,
|
||||
}
|
||||
}
|
||||
|
||||
// PerformSOAPAction makes a SOAP request, with the given action.
|
||||
// inAction and outAction must both be pointers to structs with string fields
|
||||
// only.
|
||||
func (client *SOAPClient) PerformAction(actionNamespace, actionName string, inAction interface{}, outAction interface{}) error {
|
||||
requestBytes, err := encodeRequestAction(actionNamespace, actionName, inAction)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
response, err := client.HTTPClient.Do(&http.Request{
|
||||
Method: "POST",
|
||||
URL: &client.EndpointURL,
|
||||
Header: http.Header{
|
||||
"SOAPACTION": []string{`"` + actionNamespace + "#" + actionName + `"`},
|
||||
"CONTENT-TYPE": []string{"text/xml; charset=\"utf-8\""},
|
||||
},
|
||||
Body: ioutil.NopCloser(bytes.NewBuffer(requestBytes)),
|
||||
// Set ContentLength to avoid chunked encoding - some servers might not support it.
|
||||
ContentLength: int64(len(requestBytes)),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("goupnp: error performing SOAP HTTP request: %v", err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != 200 {
|
||||
return fmt.Errorf("goupnp: SOAP request got HTTP %s", response.Status)
|
||||
}
|
||||
|
||||
responseEnv := newSOAPEnvelope()
|
||||
decoder := xml.NewDecoder(response.Body)
|
||||
if err := decoder.Decode(responseEnv); err != nil {
|
||||
return fmt.Errorf("goupnp: error decoding response body: %v", err)
|
||||
}
|
||||
|
||||
if responseEnv.Body.Fault != nil {
|
||||
return responseEnv.Body.Fault
|
||||
}
|
||||
|
||||
if outAction != nil {
|
||||
if err := xml.Unmarshal(responseEnv.Body.RawAction, outAction); err != nil {
|
||||
return fmt.Errorf("goupnp: error unmarshalling out action: %v, %v", err, responseEnv.Body.RawAction)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newSOAPAction creates a soapEnvelope with the given action and arguments.
|
||||
func newSOAPEnvelope() *soapEnvelope {
|
||||
return &soapEnvelope{
|
||||
EncodingStyle: soapEncodingStyle,
|
||||
}
|
||||
}
|
||||
|
||||
// encodeRequestAction is a hacky way to create an encoded SOAP envelope
|
||||
// containing the given action. Experiments with one router have shown that it
|
||||
// 500s for requests where the outer default xmlns is set to the SOAP
|
||||
// namespace, and then reassigning the default namespace within that to the
|
||||
// service namespace. Hand-coding the outer XML to work-around this.
|
||||
func encodeRequestAction(actionNamespace, actionName string, inAction interface{}) ([]byte, error) {
|
||||
requestBuf := new(bytes.Buffer)
|
||||
requestBuf.WriteString(soapPrefix)
|
||||
requestBuf.WriteString(`<u:`)
|
||||
xml.EscapeText(requestBuf, []byte(actionName))
|
||||
requestBuf.WriteString(` xmlns:u="`)
|
||||
xml.EscapeText(requestBuf, []byte(actionNamespace))
|
||||
requestBuf.WriteString(`">`)
|
||||
if inAction != nil {
|
||||
if err := encodeRequestArgs(requestBuf, inAction); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
requestBuf.WriteString(`</u:`)
|
||||
xml.EscapeText(requestBuf, []byte(actionName))
|
||||
requestBuf.WriteString(`>`)
|
||||
requestBuf.WriteString(soapSuffix)
|
||||
return requestBuf.Bytes(), nil
|
||||
}
|
||||
|
||||
func encodeRequestArgs(w *bytes.Buffer, inAction interface{}) error {
|
||||
in := reflect.Indirect(reflect.ValueOf(inAction))
|
||||
if in.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("goupnp: SOAP inAction is not a struct but of type %v", in.Type())
|
||||
}
|
||||
enc := xml.NewEncoder(w)
|
||||
nFields := in.NumField()
|
||||
inType := in.Type()
|
||||
for i := 0; i < nFields; i++ {
|
||||
field := inType.Field(i)
|
||||
argName := field.Name
|
||||
if nameOverride := field.Tag.Get("soap"); nameOverride != "" {
|
||||
argName = nameOverride
|
||||
}
|
||||
value := in.Field(i)
|
||||
if value.Kind() != reflect.String {
|
||||
return fmt.Errorf("goupnp: SOAP arg %q is not of type string, but of type %v", argName, value.Type())
|
||||
}
|
||||
elem := xml.StartElement{xml.Name{"", argName}, nil}
|
||||
if err := enc.EncodeToken(elem); err != nil {
|
||||
return fmt.Errorf("goupnp: error encoding start element for SOAP arg %q: %v", argName, err)
|
||||
}
|
||||
if err := enc.Flush(); err != nil {
|
||||
return fmt.Errorf("goupnp: error flushing start element for SOAP arg %q: %v", argName, err)
|
||||
}
|
||||
if _, err := w.Write([]byte(escapeXMLText(value.Interface().(string)))); err != nil {
|
||||
return fmt.Errorf("goupnp: error writing value for SOAP arg %q: %v", argName, err)
|
||||
}
|
||||
if err := enc.EncodeToken(elem.End()); err != nil {
|
||||
return fmt.Errorf("goupnp: error encoding end element for SOAP arg %q: %v", argName, err)
|
||||
}
|
||||
}
|
||||
enc.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
var xmlCharRx = regexp.MustCompile("[<>&]")
|
||||
|
||||
// escapeXMLText is used by generated code to escape text in XML, but only
|
||||
// escaping the characters `<`, `>`, and `&`.
|
||||
//
|
||||
// This is provided in order to work around SOAP server implementations that
|
||||
// fail to decode XML correctly, specifically failing to decode `"`, `'`. Note
|
||||
// that this can only be safely used for injecting into XML text, but not into
|
||||
// attributes or other contexts.
|
||||
func escapeXMLText(s string) string {
|
||||
return xmlCharRx.ReplaceAllStringFunc(s, replaceEntity)
|
||||
}
|
||||
|
||||
func replaceEntity(s string) string {
|
||||
switch s {
|
||||
case "<":
|
||||
return "<"
|
||||
case ">":
|
||||
return ">"
|
||||
case "&":
|
||||
return "&"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type soapEnvelope struct {
|
||||
XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"`
|
||||
EncodingStyle string `xml:"http://schemas.xmlsoap.org/soap/envelope/ encodingStyle,attr"`
|
||||
Body soapBody `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"`
|
||||
}
|
||||
|
||||
type soapBody struct {
|
||||
Fault *SOAPFaultError `xml:"Fault"`
|
||||
RawAction []byte `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// SOAPFaultError implements error, and contains SOAP fault information.
|
||||
type SOAPFaultError struct {
|
||||
FaultCode string `xml:"faultcode"`
|
||||
FaultString string `xml:"faultstring"`
|
||||
Detail string `xml:"detail"`
|
||||
}
|
||||
|
||||
func (err *SOAPFaultError) Error() string {
|
||||
return fmt.Sprintf("SOAP fault: %s", err.FaultString)
|
||||
}
|
528
vendor/github.com/huin/goupnp/soap/types.go
generated
vendored
Normal file
528
vendor/github.com/huin/goupnp/soap/types.go
generated
vendored
Normal file
@ -0,0 +1,528 @@
|
||||
package soap
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
// localLoc acts like time.Local for this package, but is faked out by the
|
||||
// unit tests to ensure that things stay constant (especially when running
|
||||
// this test in a place where local time is UTC which might mask bugs).
|
||||
localLoc = time.Local
|
||||
)
|
||||
|
||||
func MarshalUi1(v uint8) (string, error) {
|
||||
return strconv.FormatUint(uint64(v), 10), nil
|
||||
}
|
||||
|
||||
func UnmarshalUi1(s string) (uint8, error) {
|
||||
v, err := strconv.ParseUint(s, 10, 8)
|
||||
return uint8(v), err
|
||||
}
|
||||
|
||||
func MarshalUi2(v uint16) (string, error) {
|
||||
return strconv.FormatUint(uint64(v), 10), nil
|
||||
}
|
||||
|
||||
func UnmarshalUi2(s string) (uint16, error) {
|
||||
v, err := strconv.ParseUint(s, 10, 16)
|
||||
return uint16(v), err
|
||||
}
|
||||
|
||||
func MarshalUi4(v uint32) (string, error) {
|
||||
return strconv.FormatUint(uint64(v), 10), nil
|
||||
}
|
||||
|
||||
func UnmarshalUi4(s string) (uint32, error) {
|
||||
v, err := strconv.ParseUint(s, 10, 32)
|
||||
return uint32(v), err
|
||||
}
|
||||
|
||||
func MarshalUi8(v uint64) (string, error) {
|
||||
return strconv.FormatUint(v, 10), nil
|
||||
}
|
||||
|
||||
func UnmarshalUi8(s string) (uint64, error) {
|
||||
v, err := strconv.ParseUint(s, 10, 64)
|
||||
return uint64(v), err
|
||||
}
|
||||
|
||||
func MarshalI1(v int8) (string, error) {
|
||||
return strconv.FormatInt(int64(v), 10), nil
|
||||
}
|
||||
|
||||
func UnmarshalI1(s string) (int8, error) {
|
||||
v, err := strconv.ParseInt(s, 10, 8)
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
func MarshalI2(v int16) (string, error) {
|
||||
return strconv.FormatInt(int64(v), 10), nil
|
||||
}
|
||||
|
||||
func UnmarshalI2(s string) (int16, error) {
|
||||
v, err := strconv.ParseInt(s, 10, 16)
|
||||
return int16(v), err
|
||||
}
|
||||
|
||||
func MarshalI4(v int32) (string, error) {
|
||||
return strconv.FormatInt(int64(v), 10), nil
|
||||
}
|
||||
|
||||
func UnmarshalI4(s string) (int32, error) {
|
||||
v, err := strconv.ParseInt(s, 10, 32)
|
||||
return int32(v), err
|
||||
}
|
||||
|
||||
func MarshalInt(v int64) (string, error) {
|
||||
return strconv.FormatInt(v, 10), nil
|
||||
}
|
||||
|
||||
func UnmarshalInt(s string) (int64, error) {
|
||||
return strconv.ParseInt(s, 10, 64)
|
||||
}
|
||||
|
||||
func MarshalR4(v float32) (string, error) {
|
||||
return strconv.FormatFloat(float64(v), 'G', -1, 32), nil
|
||||
}
|
||||
|
||||
func UnmarshalR4(s string) (float32, error) {
|
||||
v, err := strconv.ParseFloat(s, 32)
|
||||
return float32(v), err
|
||||
}
|
||||
|
||||
func MarshalR8(v float64) (string, error) {
|
||||
return strconv.FormatFloat(v, 'G', -1, 64), nil
|
||||
}
|
||||
|
||||
func UnmarshalR8(s string) (float64, error) {
|
||||
v, err := strconv.ParseFloat(s, 64)
|
||||
return float64(v), err
|
||||
}
|
||||
|
||||
// MarshalFixed14_4 marshals float64 to SOAP "fixed.14.4" type.
|
||||
func MarshalFixed14_4(v float64) (string, error) {
|
||||
if v >= 1e14 || v <= -1e14 {
|
||||
return "", fmt.Errorf("soap fixed14.4: value %v out of bounds", v)
|
||||
}
|
||||
return strconv.FormatFloat(v, 'f', 4, 64), nil
|
||||
}
|
||||
|
||||
// UnmarshalFixed14_4 unmarshals float64 from SOAP "fixed.14.4" type.
|
||||
func UnmarshalFixed14_4(s string) (float64, error) {
|
||||
v, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if v >= 1e14 || v <= -1e14 {
|
||||
return 0, fmt.Errorf("soap fixed14.4: value %q out of bounds", s)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// MarshalChar marshals rune to SOAP "char" type.
|
||||
func MarshalChar(v rune) (string, error) {
|
||||
if v == 0 {
|
||||
return "", errors.New("soap char: rune 0 is not allowed")
|
||||
}
|
||||
return string(v), nil
|
||||
}
|
||||
|
||||
// UnmarshalChar unmarshals rune from SOAP "char" type.
|
||||
func UnmarshalChar(s string) (rune, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, errors.New("soap char: got empty string")
|
||||
}
|
||||
r, n := utf8.DecodeRune([]byte(s))
|
||||
if n != len(s) {
|
||||
return 0, fmt.Errorf("soap char: value %q is not a single rune", s)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func MarshalString(v string) (string, error) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func UnmarshalString(v string) (string, error) {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func parseInt(s string, err *error) int {
|
||||
v, parseErr := strconv.ParseInt(s, 10, 64)
|
||||
if parseErr != nil {
|
||||
*err = parseErr
|
||||
}
|
||||
return int(v)
|
||||
}
|
||||
|
||||
var dateRegexps = []*regexp.Regexp{
|
||||
// yyyy[-mm[-dd]]
|
||||
regexp.MustCompile(`^(\d{4})(?:-(\d{2})(?:-(\d{2}))?)?$`),
|
||||
// yyyy[mm[dd]]
|
||||
regexp.MustCompile(`^(\d{4})(?:(\d{2})(?:(\d{2}))?)?$`),
|
||||
}
|
||||
|
||||
func parseDateParts(s string) (year, month, day int, err error) {
|
||||
var parts []string
|
||||
for _, re := range dateRegexps {
|
||||
parts = re.FindStringSubmatch(s)
|
||||
if parts != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if parts == nil {
|
||||
err = fmt.Errorf("soap date: value %q is not in a recognized ISO8601 date format", s)
|
||||
return
|
||||
}
|
||||
|
||||
year = parseInt(parts[1], &err)
|
||||
month = 1
|
||||
day = 1
|
||||
if len(parts[2]) != 0 {
|
||||
month = parseInt(parts[2], &err)
|
||||
if len(parts[3]) != 0 {
|
||||
day = parseInt(parts[3], &err)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("soap date: %q: %v", s, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var timeRegexps = []*regexp.Regexp{
|
||||
// hh[:mm[:ss]]
|
||||
regexp.MustCompile(`^(\d{2})(?::(\d{2})(?::(\d{2}))?)?$`),
|
||||
// hh[mm[ss]]
|
||||
regexp.MustCompile(`^(\d{2})(?:(\d{2})(?:(\d{2}))?)?$`),
|
||||
}
|
||||
|
||||
func parseTimeParts(s string) (hour, minute, second int, err error) {
|
||||
var parts []string
|
||||
for _, re := range timeRegexps {
|
||||
parts = re.FindStringSubmatch(s)
|
||||
if parts != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if parts == nil {
|
||||
err = fmt.Errorf("soap time: value %q is not in ISO8601 time format", s)
|
||||
return
|
||||
}
|
||||
|
||||
hour = parseInt(parts[1], &err)
|
||||
if len(parts[2]) != 0 {
|
||||
minute = parseInt(parts[2], &err)
|
||||
if len(parts[3]) != 0 {
|
||||
second = parseInt(parts[3], &err)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("soap time: %q: %v", s, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// (+|-)hh[[:]mm]
|
||||
var timezoneRegexp = regexp.MustCompile(`^([+-])(\d{2})(?::?(\d{2}))?$`)
|
||||
|
||||
func parseTimezone(s string) (offset int, err error) {
|
||||
if s == "Z" {
|
||||
return 0, nil
|
||||
}
|
||||
parts := timezoneRegexp.FindStringSubmatch(s)
|
||||
if parts == nil {
|
||||
err = fmt.Errorf("soap timezone: value %q is not in ISO8601 timezone format", s)
|
||||
return
|
||||
}
|
||||
|
||||
offset = parseInt(parts[2], &err) * 3600
|
||||
if len(parts[3]) != 0 {
|
||||
offset += parseInt(parts[3], &err) * 60
|
||||
}
|
||||
if parts[1] == "-" {
|
||||
offset = -offset
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err = fmt.Errorf("soap timezone: %q: %v", s, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var completeDateTimeZoneRegexp = regexp.MustCompile(`^([^T]+)(?:T([^-+Z]+)(.+)?)?$`)
|
||||
|
||||
// splitCompleteDateTimeZone splits date, time and timezone apart from an
|
||||
// ISO8601 string. It does not ensure that the contents of each part are
|
||||
// correct, it merely splits on certain delimiters.
|
||||
// e.g "2010-09-08T12:15:10+0700" => "2010-09-08", "12:15:10", "+0700".
|
||||
// Timezone can only be present if time is also present.
|
||||
func splitCompleteDateTimeZone(s string) (dateStr, timeStr, zoneStr string, err error) {
|
||||
parts := completeDateTimeZoneRegexp.FindStringSubmatch(s)
|
||||
if parts == nil {
|
||||
err = fmt.Errorf("soap date/time/zone: value %q is not in ISO8601 datetime format", s)
|
||||
return
|
||||
}
|
||||
dateStr = parts[1]
|
||||
timeStr = parts[2]
|
||||
zoneStr = parts[3]
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalDate marshals time.Time to SOAP "date" type. Note that this converts
|
||||
// to local time, and discards the time-of-day components.
|
||||
func MarshalDate(v time.Time) (string, error) {
|
||||
return v.In(localLoc).Format("2006-01-02"), nil
|
||||
}
|
||||
|
||||
var dateFmts = []string{"2006-01-02", "20060102"}
|
||||
|
||||
// UnmarshalDate unmarshals time.Time from SOAP "date" type. This outputs the
|
||||
// date as midnight in the local time zone.
|
||||
func UnmarshalDate(s string) (time.Time, error) {
|
||||
year, month, day, err := parseDateParts(s)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return time.Date(year, time.Month(month), day, 0, 0, 0, 0, localLoc), nil
|
||||
}
|
||||
|
||||
// TimeOfDay is used in cases where SOAP "time" or "time.tz" is used.
|
||||
type TimeOfDay struct {
|
||||
// Duration of time since midnight.
|
||||
FromMidnight time.Duration
|
||||
|
||||
// Set to true if Offset is specified. If false, then the timezone is
|
||||
// unspecified (and by ISO8601 - implies some "local" time).
|
||||
HasOffset bool
|
||||
|
||||
// Offset is non-zero only if time.tz is used. It is otherwise ignored. If
|
||||
// non-zero, then it is regarded as a UTC offset in seconds. Note that the
|
||||
// sub-minutes is ignored by the marshal function.
|
||||
Offset int
|
||||
}
|
||||
|
||||
// MarshalTimeOfDay marshals TimeOfDay to the "time" type.
|
||||
func MarshalTimeOfDay(v TimeOfDay) (string, error) {
|
||||
d := int64(v.FromMidnight / time.Second)
|
||||
hour := d / 3600
|
||||
d = d % 3600
|
||||
minute := d / 60
|
||||
second := d % 60
|
||||
|
||||
return fmt.Sprintf("%02d:%02d:%02d", hour, minute, second), nil
|
||||
}
|
||||
|
||||
// UnmarshalTimeOfDay unmarshals TimeOfDay from the "time" type.
|
||||
func UnmarshalTimeOfDay(s string) (TimeOfDay, error) {
|
||||
t, err := UnmarshalTimeOfDayTz(s)
|
||||
if err != nil {
|
||||
return TimeOfDay{}, err
|
||||
} else if t.HasOffset {
|
||||
return TimeOfDay{}, fmt.Errorf("soap time: value %q contains unexpected timezone", s)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// MarshalTimeOfDayTz marshals TimeOfDay to the "time.tz" type.
|
||||
func MarshalTimeOfDayTz(v TimeOfDay) (string, error) {
|
||||
d := int64(v.FromMidnight / time.Second)
|
||||
hour := d / 3600
|
||||
d = d % 3600
|
||||
minute := d / 60
|
||||
second := d % 60
|
||||
|
||||
tz := ""
|
||||
if v.HasOffset {
|
||||
if v.Offset == 0 {
|
||||
tz = "Z"
|
||||
} else {
|
||||
offsetMins := v.Offset / 60
|
||||
sign := '+'
|
||||
if offsetMins < 1 {
|
||||
offsetMins = -offsetMins
|
||||
sign = '-'
|
||||
}
|
||||
tz = fmt.Sprintf("%c%02d:%02d", sign, offsetMins/60, offsetMins%60)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%02d:%02d:%02d%s", hour, minute, second, tz), nil
|
||||
}
|
||||
|
||||
// UnmarshalTimeOfDayTz unmarshals TimeOfDay from the "time.tz" type.
|
||||
func UnmarshalTimeOfDayTz(s string) (tod TimeOfDay, err error) {
|
||||
zoneIndex := strings.IndexAny(s, "Z+-")
|
||||
var timePart string
|
||||
var hasOffset bool
|
||||
var offset int
|
||||
if zoneIndex == -1 {
|
||||
hasOffset = false
|
||||
timePart = s
|
||||
} else {
|
||||
hasOffset = true
|
||||
timePart = s[:zoneIndex]
|
||||
if offset, err = parseTimezone(s[zoneIndex:]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
hour, minute, second, err := parseTimeParts(timePart)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fromMidnight := time.Duration(hour*3600+minute*60+second) * time.Second
|
||||
|
||||
// ISO8601 special case - values up to 24:00:00 are allowed, so using
|
||||
// strictly greater-than for the maximum value.
|
||||
if fromMidnight > 24*time.Hour || minute >= 60 || second >= 60 {
|
||||
return TimeOfDay{}, fmt.Errorf("soap time.tz: value %q has value(s) out of range", s)
|
||||
}
|
||||
|
||||
return TimeOfDay{
|
||||
FromMidnight: time.Duration(hour*3600+minute*60+second) * time.Second,
|
||||
HasOffset: hasOffset,
|
||||
Offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MarshalDateTime marshals time.Time to SOAP "dateTime" type. Note that this
|
||||
// converts to local time.
|
||||
func MarshalDateTime(v time.Time) (string, error) {
|
||||
return v.In(localLoc).Format("2006-01-02T15:04:05"), nil
|
||||
}
|
||||
|
||||
// UnmarshalDateTime unmarshals time.Time from the SOAP "dateTime" type. This
|
||||
// returns a value in the local timezone.
|
||||
func UnmarshalDateTime(s string) (result time.Time, err error) {
|
||||
dateStr, timeStr, zoneStr, err := splitCompleteDateTimeZone(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if len(zoneStr) != 0 {
|
||||
err = fmt.Errorf("soap datetime: unexpected timezone in %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
year, month, day, err := parseDateParts(dateStr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var hour, minute, second int
|
||||
if len(timeStr) != 0 {
|
||||
hour, minute, second, err = parseTimeParts(timeStr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
result = time.Date(year, time.Month(month), day, hour, minute, second, 0, localLoc)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalDateTimeTz marshals time.Time to SOAP "dateTime.tz" type.
|
||||
func MarshalDateTimeTz(v time.Time) (string, error) {
|
||||
return v.Format("2006-01-02T15:04:05-07:00"), nil
|
||||
}
|
||||
|
||||
// UnmarshalDateTimeTz unmarshals time.Time from the SOAP "dateTime.tz" type.
|
||||
// This returns a value in the local timezone when the timezone is unspecified.
|
||||
func UnmarshalDateTimeTz(s string) (result time.Time, err error) {
|
||||
dateStr, timeStr, zoneStr, err := splitCompleteDateTimeZone(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
year, month, day, err := parseDateParts(dateStr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var hour, minute, second int
|
||||
var location *time.Location = localLoc
|
||||
if len(timeStr) != 0 {
|
||||
hour, minute, second, err = parseTimeParts(timeStr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(zoneStr) != 0 {
|
||||
var offset int
|
||||
offset, err = parseTimezone(zoneStr)
|
||||
if offset == 0 {
|
||||
location = time.UTC
|
||||
} else {
|
||||
location = time.FixedZone("", offset)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result = time.Date(year, time.Month(month), day, hour, minute, second, 0, location)
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalBoolean marshals bool to SOAP "boolean" type.
|
||||
func MarshalBoolean(v bool) (string, error) {
|
||||
if v {
|
||||
return "1", nil
|
||||
}
|
||||
return "0", nil
|
||||
}
|
||||
|
||||
// UnmarshalBoolean unmarshals bool from the SOAP "boolean" type.
|
||||
func UnmarshalBoolean(s string) (bool, error) {
|
||||
switch s {
|
||||
case "0", "false", "no":
|
||||
return false, nil
|
||||
case "1", "true", "yes":
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("soap boolean: %q is not a valid boolean value", s)
|
||||
}
|
||||
|
||||
// MarshalBinBase64 marshals []byte to SOAP "bin.base64" type.
|
||||
func MarshalBinBase64(v []byte) (string, error) {
|
||||
return base64.StdEncoding.EncodeToString(v), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinBase64 unmarshals []byte from the SOAP "bin.base64" type.
|
||||
func UnmarshalBinBase64(s string) ([]byte, error) {
|
||||
return base64.StdEncoding.DecodeString(s)
|
||||
}
|
||||
|
||||
// MarshalBinHex marshals []byte to SOAP "bin.hex" type.
|
||||
func MarshalBinHex(v []byte) (string, error) {
|
||||
return hex.EncodeToString(v), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinHex unmarshals []byte from the SOAP "bin.hex" type.
|
||||
func UnmarshalBinHex(s string) ([]byte, error) {
|
||||
return hex.DecodeString(s)
|
||||
}
|
||||
|
||||
// MarshalURI marshals *url.URL to SOAP "uri" type.
|
||||
func MarshalURI(v *url.URL) (string, error) {
|
||||
return v.String(), nil
|
||||
}
|
||||
|
||||
// UnmarshalURI unmarshals *url.URL from the SOAP "uri" type.
|
||||
func UnmarshalURI(s string) (*url.URL, error) {
|
||||
return url.Parse(s)
|
||||
}
|
312
vendor/github.com/huin/goupnp/ssdp/registry.go
generated
vendored
Normal file
312
vendor/github.com/huin/goupnp/ssdp/registry.go
generated
vendored
Normal file
@ -0,0 +1,312 @@
|
||||
package ssdp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/huin/goupnp/httpu"
|
||||
)
|
||||
|
||||
const (
|
||||
maxExpiryTimeSeconds = 24 * 60 * 60
|
||||
)
|
||||
|
||||
var (
|
||||
maxAgeRx = regexp.MustCompile("max-age= *([0-9]+)")
|
||||
)
|
||||
|
||||
const (
|
||||
EventAlive = EventType(iota)
|
||||
EventUpdate
|
||||
EventByeBye
|
||||
)
|
||||
|
||||
type EventType int8
|
||||
|
||||
func (et EventType) String() string {
|
||||
switch et {
|
||||
case EventAlive:
|
||||
return "EventAlive"
|
||||
case EventUpdate:
|
||||
return "EventUpdate"
|
||||
case EventByeBye:
|
||||
return "EventByeBye"
|
||||
default:
|
||||
return fmt.Sprintf("EventUnknown(%d)", int8(et))
|
||||
}
|
||||
}
|
||||
|
||||
type Update struct {
|
||||
// The USN of the service.
|
||||
USN string
|
||||
// What happened.
|
||||
EventType EventType
|
||||
// The entry, which is nil if the service was not known and
|
||||
// EventType==EventByeBye. The contents of this must not be modified as it is
|
||||
// shared with the registry and other listeners. Once created, the Registry
|
||||
// does not modify the Entry value - any updates are replaced with a new
|
||||
// Entry value.
|
||||
Entry *Entry
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
// The address that the entry data was actually received from.
|
||||
RemoteAddr string
|
||||
// Unique Service Name. Identifies a unique instance of a device or service.
|
||||
USN string
|
||||
// Notfication Type. The type of device or service being announced.
|
||||
NT string
|
||||
// Server's self-identifying string.
|
||||
Server string
|
||||
Host string
|
||||
// Location of the UPnP root device description.
|
||||
Location url.URL
|
||||
|
||||
// Despite BOOTID,CONFIGID being required fields, apparently they are not
|
||||
// always set by devices. Set to -1 if not present.
|
||||
|
||||
BootID int32
|
||||
ConfigID int32
|
||||
|
||||
SearchPort uint16
|
||||
|
||||
// When the last update was received for this entry identified by this USN.
|
||||
LastUpdate time.Time
|
||||
// When the last update's cached values are advised to expire.
|
||||
CacheExpiry time.Time
|
||||
}
|
||||
|
||||
func newEntryFromRequest(r *http.Request) (*Entry, error) {
|
||||
now := time.Now()
|
||||
expiryDuration, err := parseCacheControlMaxAge(r.Header.Get("CACHE-CONTROL"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssdp: error parsing CACHE-CONTROL max age: %v", err)
|
||||
}
|
||||
|
||||
loc, err := url.Parse(r.Header.Get("LOCATION"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("ssdp: error parsing entry Location URL: %v", err)
|
||||
}
|
||||
|
||||
bootID, err := parseUpnpIntHeader(r.Header, "BOOTID.UPNP.ORG", -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configID, err := parseUpnpIntHeader(r.Header, "CONFIGID.UPNP.ORG", -1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
searchPort, err := parseUpnpIntHeader(r.Header, "SEARCHPORT.UPNP.ORG", ssdpSearchPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if searchPort < 1 || searchPort > 65535 {
|
||||
return nil, fmt.Errorf("ssdp: search port %d is out of range", searchPort)
|
||||
}
|
||||
|
||||
return &Entry{
|
||||
RemoteAddr: r.RemoteAddr,
|
||||
USN: r.Header.Get("USN"),
|
||||
NT: r.Header.Get("NT"),
|
||||
Server: r.Header.Get("SERVER"),
|
||||
Host: r.Header.Get("HOST"),
|
||||
Location: *loc,
|
||||
BootID: bootID,
|
||||
ConfigID: configID,
|
||||
SearchPort: uint16(searchPort),
|
||||
LastUpdate: now,
|
||||
CacheExpiry: now.Add(expiryDuration),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseCacheControlMaxAge(cc string) (time.Duration, error) {
|
||||
matches := maxAgeRx.FindStringSubmatch(cc)
|
||||
if len(matches) != 2 {
|
||||
return 0, fmt.Errorf("did not find exactly one max-age in cache control header: %q", cc)
|
||||
}
|
||||
expirySeconds, err := strconv.ParseInt(matches[1], 10, 16)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if expirySeconds < 1 || expirySeconds > maxExpiryTimeSeconds {
|
||||
return 0, fmt.Errorf("rejecting bad expiry time of %d seconds", expirySeconds)
|
||||
}
|
||||
return time.Duration(expirySeconds) * time.Second, nil
|
||||
}
|
||||
|
||||
// parseUpnpIntHeader is intended to parse the
|
||||
// {BOOT,CONFIGID,SEARCHPORT}.UPNP.ORG header fields. It returns the def if
|
||||
// the head is empty or missing.
|
||||
func parseUpnpIntHeader(headers http.Header, headerName string, def int32) (int32, error) {
|
||||
s := headers.Get(headerName)
|
||||
if s == "" {
|
||||
return def, nil
|
||||
}
|
||||
v, err := strconv.ParseInt(s, 10, 32)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ssdp: could not parse header %s: %v", headerName, err)
|
||||
}
|
||||
return int32(v), nil
|
||||
}
|
||||
|
||||
var _ httpu.Handler = new(Registry)
|
||||
|
||||
// Registry maintains knowledge of discovered devices and services.
|
||||
//
|
||||
// NOTE: the interface for this is experimental and may change, or go away
|
||||
// entirely.
|
||||
type Registry struct {
|
||||
lock sync.Mutex
|
||||
byUSN map[string]*Entry
|
||||
|
||||
listenersLock sync.RWMutex
|
||||
listeners map[chan<- Update]struct{}
|
||||
}
|
||||
|
||||
func NewRegistry() *Registry {
|
||||
return &Registry{
|
||||
byUSN: make(map[string]*Entry),
|
||||
listeners: make(map[chan<- Update]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// NewServerAndRegistry is a convenience function to create a registry, and an
|
||||
// httpu server to pass it messages. Call ListenAndServe on the server for
|
||||
// messages to be processed.
|
||||
func NewServerAndRegistry() (*httpu.Server, *Registry) {
|
||||
reg := NewRegistry()
|
||||
srv := &httpu.Server{
|
||||
Addr: ssdpUDP4Addr,
|
||||
Multicast: true,
|
||||
Handler: reg,
|
||||
}
|
||||
return srv, reg
|
||||
}
|
||||
|
||||
func (reg *Registry) AddListener(c chan<- Update) {
|
||||
reg.listenersLock.Lock()
|
||||
defer reg.listenersLock.Unlock()
|
||||
reg.listeners[c] = struct{}{}
|
||||
}
|
||||
|
||||
func (reg *Registry) RemoveListener(c chan<- Update) {
|
||||
reg.listenersLock.Lock()
|
||||
defer reg.listenersLock.Unlock()
|
||||
delete(reg.listeners, c)
|
||||
}
|
||||
|
||||
func (reg *Registry) sendUpdate(u Update) {
|
||||
reg.listenersLock.RLock()
|
||||
defer reg.listenersLock.RUnlock()
|
||||
for c := range reg.listeners {
|
||||
c <- u
|
||||
}
|
||||
}
|
||||
|
||||
// GetService returns known service (or device) entries for the given service
|
||||
// URN.
|
||||
func (reg *Registry) GetService(serviceURN string) []*Entry {
|
||||
// Currently assumes that the map is small, so we do a linear search rather
|
||||
// than indexed to avoid maintaining two maps.
|
||||
var results []*Entry
|
||||
reg.lock.Lock()
|
||||
defer reg.lock.Unlock()
|
||||
for _, entry := range reg.byUSN {
|
||||
if entry.NT == serviceURN {
|
||||
results = append(results, entry)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
// ServeMessage implements httpu.Handler, and uses SSDP NOTIFY requests to
|
||||
// maintain the registry of devices and services.
|
||||
func (reg *Registry) ServeMessage(r *http.Request) {
|
||||
if r.Method != methodNotify {
|
||||
return
|
||||
}
|
||||
|
||||
nts := r.Header.Get("nts")
|
||||
|
||||
var err error
|
||||
switch nts {
|
||||
case ntsAlive:
|
||||
err = reg.handleNTSAlive(r)
|
||||
case ntsUpdate:
|
||||
err = reg.handleNTSUpdate(r)
|
||||
case ntsByebye:
|
||||
err = reg.handleNTSByebye(r)
|
||||
default:
|
||||
err = fmt.Errorf("unknown NTS value: %q", nts)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("goupnp/ssdp: failed to handle %s message from %s: %v", nts, r.RemoteAddr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (reg *Registry) handleNTSAlive(r *http.Request) error {
|
||||
entry, err := newEntryFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reg.lock.Lock()
|
||||
reg.byUSN[entry.USN] = entry
|
||||
reg.lock.Unlock()
|
||||
|
||||
reg.sendUpdate(Update{
|
||||
USN: entry.USN,
|
||||
EventType: EventAlive,
|
||||
Entry: entry,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (reg *Registry) handleNTSUpdate(r *http.Request) error {
|
||||
entry, err := newEntryFromRequest(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nextBootID, err := parseUpnpIntHeader(r.Header, "NEXTBOOTID.UPNP.ORG", -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
entry.BootID = nextBootID
|
||||
|
||||
reg.lock.Lock()
|
||||
reg.byUSN[entry.USN] = entry
|
||||
reg.lock.Unlock()
|
||||
|
||||
reg.sendUpdate(Update{
|
||||
USN: entry.USN,
|
||||
EventType: EventUpdate,
|
||||
Entry: entry,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (reg *Registry) handleNTSByebye(r *http.Request) error {
|
||||
usn := r.Header.Get("USN")
|
||||
|
||||
reg.lock.Lock()
|
||||
entry := reg.byUSN[usn]
|
||||
delete(reg.byUSN, usn)
|
||||
reg.lock.Unlock()
|
||||
|
||||
reg.sendUpdate(Update{
|
||||
USN: usn,
|
||||
EventType: EventByeBye,
|
||||
Entry: entry,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
90
vendor/github.com/huin/goupnp/ssdp/ssdp.go
generated
vendored
Normal file
90
vendor/github.com/huin/goupnp/ssdp/ssdp.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package ssdp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/huin/goupnp/httpu"
|
||||
)
|
||||
|
||||
const (
|
||||
ssdpDiscover = `"ssdp:discover"`
|
||||
ntsAlive = `ssdp:alive`
|
||||
ntsByebye = `ssdp:byebye`
|
||||
ntsUpdate = `ssdp:update`
|
||||
ssdpUDP4Addr = "239.255.255.250:1900"
|
||||
ssdpSearchPort = 1900
|
||||
methodSearch = "M-SEARCH"
|
||||
methodNotify = "NOTIFY"
|
||||
|
||||
// SSDPAll is a value for searchTarget that searches for all devices and services.
|
||||
SSDPAll = "ssdp:all"
|
||||
// UPNPRootDevice is a value for searchTarget that searches for all root devices.
|
||||
UPNPRootDevice = "upnp:rootdevice"
|
||||
)
|
||||
|
||||
// SSDPRawSearch performs a fairly raw SSDP search request, and returns the
|
||||
// unique response(s) that it receives. Each response has the requested
|
||||
// searchTarget, a USN, and a valid location. maxWaitSeconds states how long to
|
||||
// wait for responses in seconds, and must be a minimum of 1 (the
|
||||
// implementation waits an additional 100ms for responses to arrive), 2 is a
|
||||
// reasonable value for this. numSends is the number of requests to send - 3 is
|
||||
// a reasonable value for this.
|
||||
func SSDPRawSearch(httpu *httpu.HTTPUClient, searchTarget string, maxWaitSeconds int, numSends int) ([]*http.Response, error) {
|
||||
if maxWaitSeconds < 1 {
|
||||
return nil, errors.New("ssdp: maxWaitSeconds must be >= 1")
|
||||
}
|
||||
|
||||
seenUsns := make(map[string]bool)
|
||||
var responses []*http.Response
|
||||
req := http.Request{
|
||||
Method: methodSearch,
|
||||
// TODO: Support both IPv4 and IPv6.
|
||||
Host: ssdpUDP4Addr,
|
||||
URL: &url.URL{Opaque: "*"},
|
||||
Header: http.Header{
|
||||
// Putting headers in here avoids them being title-cased.
|
||||
// (The UPnP discovery protocol uses case-sensitive headers)
|
||||
"HOST": []string{ssdpUDP4Addr},
|
||||
"MX": []string{strconv.FormatInt(int64(maxWaitSeconds), 10)},
|
||||
"MAN": []string{ssdpDiscover},
|
||||
"ST": []string{searchTarget},
|
||||
},
|
||||
}
|
||||
allResponses, err := httpu.Do(&req, time.Duration(maxWaitSeconds)*time.Second+100*time.Millisecond, numSends)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isExactSearch := searchTarget != SSDPAll && searchTarget != UPNPRootDevice
|
||||
|
||||
for _, response := range allResponses {
|
||||
if response.StatusCode != 200 {
|
||||
log.Printf("ssdp: got response status code %q in search response", response.Status)
|
||||
continue
|
||||
}
|
||||
if st := response.Header.Get("ST"); isExactSearch && st != searchTarget {
|
||||
continue
|
||||
}
|
||||
location, err := response.Location()
|
||||
if err != nil {
|
||||
log.Printf("ssdp: no usable location in search response (discarding): %v", err)
|
||||
continue
|
||||
}
|
||||
usn := response.Header.Get("USN")
|
||||
if usn == "" {
|
||||
log.Printf("ssdp: empty/missing USN in search response (using location instead): %v", err)
|
||||
usn = location.String()
|
||||
}
|
||||
if _, alreadySeen := seenUsns[usn]; !alreadySeen {
|
||||
seenUsns[usn] = true
|
||||
responses = append(responses, response)
|
||||
}
|
||||
}
|
||||
|
||||
return responses, nil
|
||||
}
|
1
vendor/github.com/ipfs/go-ipfs-util/.gitignore
generated
vendored
Normal file
1
vendor/github.com/ipfs/go-ipfs-util/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.swp
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user